code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from collections import deque
class A__ :
"""simple docstring"""
def __init__( self , lowercase) -> int:
'''simple docstring'''
a__ : Any = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_)
self.set_fail_transitions()
def __lowercase ( self , lowercase , lowercase) -> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
a__ : List[Any] = 0
for character in keyword:
a__ : Tuple = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
a__ : Tuple = len(self.adlist) - 1
else:
a__ : Tuple = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_)
def __lowercase ( self) -> None:
'''simple docstring'''
a__ : Union[str, Any] = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_)
a__ : Union[str, Any] = 0
while q:
a__ : Any = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_)
a__ : Dict = self.adlist[r]['fail_state']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['value']) is None
and state != 0
):
a__ : Tuple = self.adlist[state]['fail_state']
a__ : Union[str, Any] = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
a__ : List[Any] = 0
a__ : Any = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def __lowercase ( self , lowercase) -> dict[str, list[int]]:
'''simple docstring'''
a__ : Any = {} # returns a dict with keywords and list of its occurrences
a__ : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE_)):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i]) is None
and current_state != 0
):
a__ : Dict = self.adlist[current_state]['fail_state']
a__ : str = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i])
if next_state is None:
a__ : List[Any] = 0
else:
a__ : Union[str, Any] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a__ : str = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self )-> str:
'''simple docstring'''
return F"Node({self.data})"
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__UpperCamelCase = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def A__ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def A__ ( self )-> bool:
'''simple docstring'''
return self.head is None
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case ) == i
linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case ) == 9
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) )
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = [
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.55555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ) -> Any:
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(f"Element at Position 1: {linked_list[1]}" )
__UpperCamelCase = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case )
print(f"length of linked_list is : {len(snake_case )}" )
if __name__ == "__main__":
main()
| 328 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__a :Optional[Any] = logging.getLogger(__name__)
def __snake_case ( __UpperCamelCase : Any=2 ,__UpperCamelCase : Union[str, Any]=3 ,__UpperCamelCase : Union[str, Any]=16 ,__UpperCamelCase : int = 10 ,__UpperCamelCase : int = 2 ):
"""simple docstring"""
def get_dataset(__UpperCamelCase : Optional[int] ):
A_ = torch.randn(batch_size * n_batches ,1 )
return TensorDataset(__UpperCamelCase ,a * x + b + 0.1 * torch.randn(batch_size * n_batches ,1 ) )
A_ = get_dataset(__UpperCamelCase )
A_ = get_dataset(__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 )
A_ = DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,batch_size=__UpperCamelCase ,num_workers=4 )
return (train_dataloader, valid_dataloader)
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : int ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : str=None ):
"""simple docstring"""
A_ = []
for epoch in range(__UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
A_ , A_ = batch
A_ = model(__UpperCamelCase )
A_ = torch.nn.functional.mse_loss(__UpperCamelCase ,__UpperCamelCase )
accelerator.backward(__UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a ( nn.Module ):
"""simple docstring"""
def __init__( self : str ):
super().__init__()
A_ = nn.Parameter(torch.randn(1 ) )
A_ = nn.Parameter(torch.randn(1 ) )
def __A ( self : str , UpperCAmelCase : List[Any] ):
return x * self.a + self.b
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
# Train baseline
A_ = Accelerator()
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , "initial" )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
A_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = Accelerator()
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoint" )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __A ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
A_ = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_1" ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((A_) , (A_)) = model.a.item(), model.b.item()
A_ = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __A ( self : List[str] ):
A_ = torch.tensor([1, 2, 3] )
A_ = torch.tensor([2, 3, 4] )
A_ = DummyModel()
A_ = torch.optim.Adam(net.parameters() )
A_ = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def __A ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
A_ = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
A_ , A_ = dummy_dataloaders()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ , A_ , A_ , A_ , A_ = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
A_ = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def __A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ = DummyModel()
A_ = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
A_ = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
A_ = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , "checkpoints" , "checkpoint_10" ) ) )
@require_cuda
def __A ( self : Dict ):
A_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
__a :Optional[int] = "/tmp/accelerate/state_checkpointing"
__a :List[Any] = DummyModel()
__a :Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__a :int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__a :str = dummy_dataloaders()
__a :Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__a :List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__a :Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__a :str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__a :int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
__a :Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__a :Any = group["params"][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__a :List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 312 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase_ ( __A = 8 ) -> str:
'''simple docstring'''
UpperCAmelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
i -= len(__A )
UpperCAmelCase__ = i // 3
UpperCAmelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase__ = (
chars_incl
+ random(__A, quotient + remainder )
+ random(__A, __A )
+ random(__A, __A )
)
UpperCAmelCase__ = list(__A )
shuffle(__A )
return "".join(__A )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
return "".join(secrets.choice(__A ) for _ in range(__A ) )
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def lowerCAmelCase_ ( __A, __A ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
pass # Put your code here...
def lowerCAmelCase_ ( __A, __A = 8 ) -> bool:
'''simple docstring'''
if len(__A ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase__ = any(char in ascii_uppercase for char in password )
UpperCAmelCase__ = any(char in ascii_lowercase for char in password )
UpperCAmelCase__ = any(char in digits for char in password )
UpperCAmelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase__ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:", password_generator(__A ) )
print(
"Alternative Password generated:", alternative_password_generator(__A, __A ), )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 65 |
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase = '''mask2former'''
lowerCAmelCase = ['''swin''']
lowerCAmelCase = {'''hidden_size''': '''hidden_dim'''}
def __init__( self , a = None , a = 2_56 , a = 2_56 , a = 2_56 , a = 10_24 , a = "relu" , a = 6 , a = 10 , a = 8 , a = 0.0 , a = 20_48 , a = False , a = False , a = 4 , a = 2_55 , a = 1_00 , a = 0.1 , a = 2.0 , a = 5.0 , a = 5.0 , a = 1_25_44 , a = 3.0 , a = 0.75 , a = 0.02 , a = 1.0 , a = True , a = [4, 8, 16, 32] , a = None , **a , ) -> List[str]:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
snake_case_ = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case_ = backbone_config.pop('model_type' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
snake_case_ = backbone_config
snake_case_ = feature_size
snake_case_ = mask_feature_size
snake_case_ = hidden_dim
snake_case_ = encoder_feedforward_dim
snake_case_ = activation_function
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = num_attention_heads
snake_case_ = dropout
snake_case_ = dim_feedforward
snake_case_ = pre_norm
snake_case_ = enforce_input_projection
snake_case_ = common_stride
snake_case_ = ignore_value
snake_case_ = num_queries
snake_case_ = no_object_weight
snake_case_ = class_weight
snake_case_ = mask_weight
snake_case_ = dice_weight
snake_case_ = train_num_points
snake_case_ = oversample_ratio
snake_case_ = importance_sample_ratio
snake_case_ = init_std
snake_case_ = init_xavier_std
snake_case_ = use_auxiliary_loss
snake_case_ = feature_strides
snake_case_ = output_auxiliary_logits
snake_case_ = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _UpperCamelCase ( cls , a , **a ) -> int:
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _UpperCamelCase ( self ) -> Dict[str, any]:
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 178 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328 | 0 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
__lowerCAmelCase = "Enter the base and the power separated by a comma: "
__lowerCAmelCase = map(int, input(prompt).split(''','''))
__lowerCAmelCase = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
__lowerCAmelCase = res(xa, ya)
__lowerCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 89 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __magic_name__ ( __a : Union[str, Any] , __a : Optional[Any]=() , __a : Optional[Any]=None , __a : Tuple="no" , __a : str="29500" ):
'''simple docstring'''
UpperCamelCase__ = False
UpperCamelCase__ = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
UpperCamelCase__ = True
elif "IPython" in sys.modules:
UpperCamelCase__ = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
UpperCamelCase__ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , __a ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
UpperCamelCase__ = 8
UpperCamelCase__ = PrepareForLaunch(__a , distributed_type="""TPU""" )
print(f"Launching a training on {num_processes} TPU cores." )
xmp.spawn(__a , args=__a , nprocs=__a , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*__a )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__a , master_addr="""127.0.01""" , master_port=__a , mixed_precision=__a ):
UpperCamelCase__ = PrepareForLaunch(__a , distributed_type="""MULTI_GPU""" )
print(f"Launching training on {num_processes} GPUs." )
try:
start_processes(__a , args=__a , nprocs=__a , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCamelCase__ = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*__a )
def __magic_name__ ( __a : Union[str, Any] , __a : Tuple=() , __a : Tuple=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=__a , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
UpperCamelCase__ = PrepareForLaunch(__a , debug=__a )
start_processes(__a , args=__a , nprocs=__a , start_method="""fork""" )
| 244 |
from math import factorial
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case ) for x in str(factorial(snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 328 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
a_ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
a_ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCamelCase = "whisper"
_lowerCamelCase = ["past_key_values"]
_lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase=5_1865 , UpperCamelCase=80 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=1536 , UpperCamelCase=1536 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=5_0257 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=256 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=False , UpperCamelCase=1500 , UpperCamelCase=448 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=None , UpperCamelCase=[220, 5_0256] , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=False , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=7 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
lowerCamelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
lowerCamelCase_ = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase_ = {0: "batch"}
else:
lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="inputs" )
return common_inputs
def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 2_2050 , UpperCamelCase = 5.0 , UpperCamelCase = 220 , ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = encoder_inputs["input_features"].shape[2]
lowerCamelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = encoder_inputs.pop("input_features" )
lowerCamelCase_ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def snake_case ( self ):
"""simple docstring"""
return 1e-3
| 55 |
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
for i in range(1 , snake_case ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : str = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 328 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def SCREAMING_SNAKE_CASE__ ( *_UpperCAmelCase ) -> str:
'''simple docstring'''
with open(_UpperCAmelCase, 'r' ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
__A : Tuple = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__A : Optional[Any] = torch.device('''cuda''', local_rank)
__A : int = socket.gethostname()
__A : Union[str, Any] = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__A : Union[str, Any] = dist.get_rank()
__A : Dict = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 138 |
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
__UpperCamelCase = 0
for character in keyword:
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCamelCase = len(self.adlist ) - 1
else:
__UpperCamelCase = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 0
while q:
__UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
__UpperCamelCase = self.adlist[state]['''fail_state''']
__UpperCamelCase = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__UpperCamelCase = 0
__UpperCamelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]:
'''simple docstring'''
__UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
__UpperCamelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__UpperCamelCase = self.adlist[current_state]['''fail_state''']
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__UpperCamelCase = 0
else:
__UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCamelCase = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
a_ = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
a_ = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
a_ = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : List[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : Optional[int] ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def A__ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
__UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
__UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 328 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :int , lowercase_ :Optional[Any]=13 , lowercase_ :Union[str, Any]=7 , lowercase_ :Optional[int]=True , lowercase_ :Optional[Any]=True , lowercase_ :Tuple=False , lowercase_ :List[str]=True , lowercase_ :Optional[int]=99 , lowercase_ :int=32 , lowercase_ :Any=5 , lowercase_ :int=4 , lowercase_ :Dict=37 , lowercase_ :int="gelu" , lowercase_ :int=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :int=5_12 , lowercase_ :Optional[int]=16 , lowercase_ :Any=2 , lowercase_ :Optional[int]=0.02 , lowercase_ :int=3 , lowercase_ :Tuple=4 , lowercase_ :Union[str, Any]=None , ) -> Dict:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Optional[int] ) -> List[str]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :int ) -> str:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :Any , lowercase_ :Any ) -> Any:
UpperCAmelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Tuple ) -> Optional[int]:
UpperCAmelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Any , lowercase_ :str , lowercase_ :Tuple ) -> Tuple:
UpperCAmelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int , lowercase_ :int ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any ) -> str:
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :int , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] ) -> str:
UpperCAmelCase = self.num_choices
UpperCAmelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = True
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = DistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Any ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :int ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :str ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCAmelCase__ ( self :int ) -> List[str]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self :int ) -> List[str]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) )
UpperCAmelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , 'traced_model.pt' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :str ) -> Tuple:
UpperCAmelCase = DistilBertModel.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 78 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a_ :int = 0
a_ :Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ :int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a_ :Dict = tuple[int, int]
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Optional[Any], _snake_case : str, _snake_case : Any, _snake_case : Tuple, _snake_case : Dict, _snake_case : List[Any], ) ->None:
snake_case__ : str = pos_x
snake_case__ : Dict = pos_y
snake_case__ : Tuple = (pos_y, pos_x)
snake_case__ : str = goal_x
snake_case__ : str = goal_y
snake_case__ : List[str] = g_cost
snake_case__ : Any = parent
snake_case__ : Optional[int] = self.calculate_heuristic()
snake_case__ : int = self.g_cost + self.h_cost
def lowercase_ ( self : str ) ->float:
snake_case__ : List[str] = self.pos_x - self.goal_x
snake_case__ : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int, _snake_case : List[Any] ) ->bool:
return self.f_cost < other.f_cost
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Optional[Any], _snake_case : List[str] ) ->List[Any]:
snake_case__ : Tuple = Node(start[1], start[0], goal[1], goal[0], 0, SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = Node(goal[1], goal[0], goal[1], goal[0], 9_9_9_9_9, SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = [self.start]
snake_case__ : Tuple = []
snake_case__ : Dict = False
def lowercase_ ( self : Optional[Any] ) ->list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
snake_case__ : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.start.pos]
def lowercase_ ( self : Any, _snake_case : Optional[int] ) ->list[Node]:
snake_case__ : Dict = []
for action in delta:
snake_case__ : int = parent.pos_x + action[1]
snake_case__ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, SCREAMING_SNAKE_CASE_, ) )
return successors
def lowercase_ ( self : Any, _snake_case : Optional[int] ) ->list[TPosition]:
snake_case__ : List[Any] = node
snake_case__ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : List[Any] = current_node.parent
path.reverse()
return path
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : List[str], _snake_case : Optional[Any] ) ->None:
snake_case__ : str = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
snake_case__ : int = AStar(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = False
def lowercase_ ( self : str ) ->list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case__ : str = self.fwd_astar.open_nodes.pop(0 )
snake_case__ : List[str] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = current_bwd_node
snake_case__ : Union[str, Any] = current_fwd_node
snake_case__ : Union[str, Any] = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
snake_case__ : str = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.fwd_astar.start.pos]
def lowercase_ ( self : Optional[int], _snake_case : List[str], _snake_case : List[str] ) ->list[TPosition]:
snake_case__ : Union[str, Any] = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : Optional[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a_ :Union[str, Any] = (0, 0)
a_ :Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ :Optional[int] = time.time()
a_ :List[str] = AStar(init, goal)
a_ :Optional[Any] = a_star.search()
a_ :Dict = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
a_ :Dict = time.time()
a_ :Dict = BidirectionalAStar(init, goal)
a_ :Tuple = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 277 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 0 |
def A_ ( A__ , A__ ) -> int:
return 1 if input_a == input_a else 0
def A_ ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 99 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__a :str = logging.get_logger(__name__)
__a :Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__a :Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ = model_type_to_module_name(__UpperCamelCase )
A_ = importlib.import_module(f'''.{module_name}''' ,"transformers.models" )
try:
return getattr(__UpperCamelCase ,__UpperCamelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__UpperCamelCase ,"__name__" ,__UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ = importlib.import_module("transformers" )
if hasattr(__UpperCamelCase ,__UpperCamelCase ):
return getattr(__UpperCamelCase ,__UpperCamelCase )
return None
def __snake_case ( __UpperCamelCase : Union[str, os.PathLike] ,__UpperCamelCase : Optional[Union[str, os.PathLike]] = None ,__UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ,__UpperCamelCase : Optional[Dict[str, str]] = None ,__UpperCamelCase : Optional[Union[bool, str]] = None ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : bool = False ,**__UpperCamelCase : List[Any] ,):
"""simple docstring"""
A_ = get_file_from_repo(
__UpperCamelCase ,__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,resume_download=__UpperCamelCase ,proxies=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,revision=__UpperCamelCase ,local_files_only=__UpperCamelCase ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(__UpperCamelCase ,encoding="utf-8" ) as reader:
return json.load(__UpperCamelCase )
class _a :
"""simple docstring"""
def __init__( self : List[str] ):
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE_ )
def __A ( cls : int , UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
A_ = kwargs.pop("config" , SCREAMING_SNAKE_CASE_ )
A_ = kwargs.pop("trust_remote_code" , SCREAMING_SNAKE_CASE_ )
A_ = True
A_ , A_ = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = config_dict.get("feature_extractor_type" , SCREAMING_SNAKE_CASE_ )
A_ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
A_ = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A_ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# It could be in `config.feature_extractor_type``
A_ = getattr(SCREAMING_SNAKE_CASE_ , "feature_extractor_type" , SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
A_ = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
A_ = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE_ )
A_ = feature_extractor_auto_map is not None
A_ = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING
A_ = resolve_trust_remote_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if has_remote_code and trust_remote_code:
A_ = get_class_from_dynamic_module(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = kwargs.pop("code_revision" , SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(SCREAMING_SNAKE_CASE_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE_ )]
return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __A ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 312 |
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
from __future__ import annotations
UpperCamelCase__ = "#"
class A :
def __init__(self : Any ) -> None:
"""simple docstring"""
UpperCAmelCase__ = {}
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = self._trie
for char in text:
if char not in trie:
UpperCAmelCase__ = {}
UpperCAmelCase__ = trie[char]
UpperCAmelCase__ = True
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple ) -> tuple | list:
"""simple docstring"""
UpperCAmelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCAmelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> tuple:
"""simple docstring"""
UpperCAmelCase__ = []
for c, v in d.items():
UpperCAmelCase__ = [" "] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = Trie()
UpperCamelCase__ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowerCAmelCase_ ( __A ) -> tuple:
'''simple docstring'''
UpperCAmelCase__ = trie.find_word(__A )
return tuple(string + word for word in suffixes )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 65 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = VideoToVideoSDPipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCAmelCase = False
# No `output_type`.
lowerCAmelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _UpperCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
snake_case_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _UpperCamelCase ( self , a , a=0 ) -> List[str]:
snake_case_ = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
snake_case_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _UpperCamelCase ( self ) -> int:
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = VideoToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
snake_case_ = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
snake_case_ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
snake_case_ = 'np'
snake_case_ = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
snake_case_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
snake_case_ = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def _UpperCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def _UpperCamelCase ( self ) -> Dict:
pass
def _UpperCamelCase ( self ) -> Optional[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
snake_case_ = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case_ = torch.randn((1, 10, 3, 10_24, 5_76) , generator=SCREAMING_SNAKE_CASE_ )
snake_case_ = video.to('cuda' )
snake_case_ = 'Spiderman is surfing'
snake_case_ = pipe(SCREAMING_SNAKE_CASE_ , video=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=3 , output_type='pt' ).frames
snake_case_ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 178 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
__UpperCamelCase = []
__UpperCamelCase = Counter()
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
__UpperCamelCase = candidate + '''\n''' + test_case
__UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__UpperCamelCase , __UpperCamelCase = [], []
for result in results.values():
result.sort()
__UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = k
__UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) )
else:
assert len(snake_case ) == len(snake_case )
__UpperCamelCase = iter(snake_case )
return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
| 328 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowerCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowerCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
__lowerCAmelCase = {ord(char) for char in VALID_CHARS}
__lowerCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | None:
_a : List[Any] = ''
_a : Any = 42
_a : List[str] = 42
_a : Optional[Any] = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase_ ) , lowerCAmelCase_ ):
_a : Union[str, Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase_ )
return decoded
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
for key in product(lowerCAmelCase_ , repeat=3 ):
_a : int = try_key(lowerCAmelCase_ , lowerCAmelCase_ )
if encoded is not None:
possibles.append(lowerCAmelCase_ )
return possibles
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def __lowerCamelCase ( lowerCAmelCase_ = "p059_cipher.txt" ) -> int:
_a : Optional[Any] = 42
_a : Optional[Any] = 42
_a : Optional[int] = 42
_a : List[Any] = 42
_a : Any = Path(lowerCAmelCase_ ).parent.joinpath(lowerCAmelCase_ ).read_text(encoding='utf-8' )
_a : str = [int(lowerCAmelCase_ ) for number in data.strip().split(',' )]
_a : List[str] = filter_valid_chars(lowerCAmelCase_ )
for common_word in COMMON_WORDS:
_a : Union[str, Any] = filter_common_word(lowerCAmelCase_ , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) == 1:
break
_a : List[Any] = possibles[0]
return sum(ord(lowerCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase__ : Optional[Any] = ["names", "prefix"]
lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"]
lowercase__ : List[str] = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case = ","
_snake_case = None
_snake_case = "infer"
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = False
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = None
_snake_case = "."
_snake_case = None
_snake_case = '"'
_snake_case = 0
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = 0
_snake_case = True
_snake_case = False
_snake_case = None
_snake_case = 10000
_snake_case = None
_snake_case = "strict"
_snake_case = "error"
_snake_case = None
def A__ ( self )-> Any:
'''simple docstring'''
if self.delimiter is not None:
__UpperCamelCase = self.delimiter
if self.column_names is not None:
__UpperCamelCase = self.column_names
@property
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case = CsvConfig
def A__ ( self )-> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ):
# cheaper cast
__UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return pa_table
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
__UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" )
raise
| 328 | 0 |
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase_ = "path-to-your-trained-model"
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase_ = "A photo of sks dog in a bucket"
lowerCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 244 |
from __future__ import annotations
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A_ ( snake_case : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__UpperCamelCase = []
for num in range(len(snake_case ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case ) == n:
return list_nums
return []
def A_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 328 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCamelCase = "xlnet"
_lowerCamelCase = ["mems"]
_lowerCamelCase = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , UpperCamelCase=3_2000 , UpperCamelCase=1024 , UpperCamelCase=24 , UpperCamelCase=16 , UpperCamelCase=4096 , UpperCamelCase="gelu" , UpperCamelCase=True , UpperCamelCase="bi" , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=0.1 , UpperCamelCase=512 , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=-1 , UpperCamelCase=False , UpperCamelCase="last" , UpperCamelCase=True , UpperCamelCase="tanh" , UpperCamelCase=0.1 , UpperCamelCase=5 , UpperCamelCase=5 , UpperCamelCase=5 , UpperCamelCase=1 , UpperCamelCase=2 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = d_model
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowerCamelCase_ = d_model // n_head
lowerCamelCase_ = ff_activation
lowerCamelCase_ = d_inner
lowerCamelCase_ = untie_r
lowerCamelCase_ = attn_type
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = dropout
lowerCamelCase_ = mem_len
lowerCamelCase_ = reuse_len
lowerCamelCase_ = bi_data
lowerCamelCase_ = clamp_len
lowerCamelCase_ = same_length
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_last_dropout
lowerCamelCase_ = start_n_top
lowerCamelCase_ = end_n_top
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase_ = kwargs["use_cache"]
lowerCamelCase_ = use_mems_eval
lowerCamelCase_ = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def snake_case ( self ):
"""simple docstring"""
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 55 |
from __future__ import annotations
from collections.abc import Callable
def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float:
'''simple docstring'''
__UpperCamelCase = x_start
__UpperCamelCase = fnc(snake_case )
__UpperCamelCase = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCamelCase = (x_end - x_start) / steps + xa
__UpperCamelCase = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCamelCase = xa
__UpperCamelCase = fxa
return area
if __name__ == "__main__":
def A_ ( snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 328 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A : Union[str, Any] = ""
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __A ( tr.AbstractTransform ):
def __init__( self : Dict , UpperCAmelCase_ : Tuple = " " ):
lowerCAmelCase : str = sentence_delimiter
def lowercase__ ( self : int , UpperCAmelCase_ : Optional[Any] ):
return list(SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Any , UpperCAmelCase_ : Any ):
lowerCAmelCase : Optional[int] = []
for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE_ ):
chars.extend(self.process_string(SCREAMING_SNAKE_CASE_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A : Optional[Any] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A : Optional[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__A : Optional[int] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
__A : str = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=False ):
if concatenate_texts:
return jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )["wer"]
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase : Any = jiwer.compute_measures(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 138 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = ["model.decoder.embed_positions.weights"]
def A_ ( snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A_ ( snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__UpperCamelCase = 1024
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 1536
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 2048
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case )
__UpperCamelCase = decoder_config_from_checkpoint(snake_case )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__UpperCamelCase = 2048
__UpperCamelCase = 2048
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoConfig.from_pretrained('''gpt2''' )
__lowerCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = generation_config.update(**SCREAMING_SNAKE_CASE_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
__lowerCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ )
assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = GenerationConfig(
do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
| 330 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 1_6
lowercase__ : str = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 328 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = ["""image_processor""", """tokenizer"""]
__UpperCamelCase = """CLIPImageProcessor"""
__UpperCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self :Union[str, Any] , lowercase_ :Union[str, Any]=None , lowercase_ :Any=None , **lowercase_ :Dict ) -> int:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self :Union[str, Any] , lowercase_ :Union[str, Any]=None , lowercase_ :Union[str, Any]=None , lowercase_ :Union[str, Any]=None , **lowercase_ :int ) -> Optional[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
UpperCAmelCase = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :Union[str, Any] , *lowercase_ :Union[str, Any] , **lowercase_ :str ) -> int:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :Dict , *lowercase_ :Any , **lowercase_ :Union[str, Any] ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 78 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'whisper'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCamelCase = {0: '''batch'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = encoder_inputs.pop('''input_features''' )
__UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-3
| 328 | 0 |
def lowercase_ (A : int = 1_0_0 ):
snake_case__ : str = 0
snake_case__ : Tuple = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 277 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'xlnet'
_snake_case = ['mems']
_snake_case = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = kwargs['''use_cache''']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 328 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def A_ ( A__ ) -> List[str]:
return getitem, k
def A_ ( A__ , A__ ) -> Tuple:
return setitem, k, v
def A_ ( A__ ) -> List[str]:
return delitem, k
def A_ ( A__ , A__ , *A__ ) -> Any:
try:
return fun(A__ , *A__ ), None
except Exception as e:
return None, e
lowercase : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
lowercase : int = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
lowercase : Tuple = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
lowercase : Tuple = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
lowercase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def A_ ( A__ ) -> Any:
a__ : int = HashMap(initial_block_size=4 )
a__ : List[Any] = {}
for _, (fun, *args) in enumerate(A__ ):
a__ , a__ : Union[str, Any] = _run_operation(A__ , A__ , *A__ )
a__ , a__ : Any = _run_operation(A__ , A__ , *A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def A_ ( ) -> List[Any]:
def is_public(A__ ) -> bool:
return not name.startswith('_' )
a__ : str = {name for name in dir({} ) if is_public(A__ )}
a__ : str = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 99 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self )-> str:
'''simple docstring'''
return F"Node({self.data})"
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__UpperCamelCase = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def A__ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def A__ ( self )-> bool:
'''simple docstring'''
return self.head is None
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case ) == i
linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case ) == 9
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) )
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = [
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.55555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ) -> Any:
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(f"Element at Position 1: {linked_list[1]}" )
__UpperCamelCase = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case )
print(f"length of linked_list is : {len(snake_case )}" )
if __name__ == "__main__":
main()
| 328 | 0 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(__UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__UpperCamelCase : int ,__UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ = sum(
count_of_possible_combinations_with_dp_array(target - item ,__UpperCamelCase )
for item in array )
A_ = answer
return answer
A_ = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__UpperCamelCase ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
A_ = [0] * (target + 1)
A_ = 1
for i in range(1 ,target + 1 ):
for j in range(__UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a :int = 3
__a :Any = 5
__a :List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 312 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = hex_num.strip()
if not hex_num:
raise ValueError("No value was passed to the function" )
UpperCAmelCase__ = hex_num[0] == "-"
if is_negative:
UpperCAmelCase__ = hex_num[1:]
try:
UpperCAmelCase__ = int(__A, 16 )
except ValueError:
raise ValueError("Invalid value was passed to the function" )
UpperCAmelCase__ = ""
while int_num > 0:
UpperCAmelCase__ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("-" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowercase = "\\n\n"
lowercase = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowercase = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _UpperCamelCase ( self , a , a , a = 16 , a = True , a=None ) -> Any:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
snake_case_ = 'cuda'
else:
snake_case_ = 'cuda' if torch.cuda.is_available() else 'cpu'
snake_case_ = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case_ = model.to(SCREAMING_SNAKE_CASE_ )
snake_case_ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
snake_case_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(SCREAMING_SNAKE_CASE_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
snake_case_ = model.config.max_length - 1
else:
snake_case_ = model.config.max_length
snake_case_ = tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).to(SCREAMING_SNAKE_CASE_ )
snake_case_ = encodings['input_ids']
snake_case_ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
snake_case_ = []
snake_case_ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ):
snake_case_ = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE_ ) )
snake_case_ = encoded_texts[start_index:end_index]
snake_case_ = attn_masks[start_index:end_index]
if add_start_token:
snake_case_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ )
snake_case_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
snake_case_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] , dim=1 )
snake_case_ = encoded_batch
with torch.no_grad():
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).logits
snake_case_ = out_logits[..., :-1, :].contiguous()
snake_case_ = labels[..., 1:].contiguous()
snake_case_ = attn_mask[..., 1:].contiguous()
snake_case_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
| 178 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowerCAmelCase = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
require_version(deps[pkg] , lowerCAmelCase_ )
| 89 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
lowerCamelCase_ = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 244 |
from math import factorial
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case ) for x in str(factorial(snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 328 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ : str = logging.get_logger(__name__)
a_ : Tuple = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCamelCase = "convnextv2"
def __init__( self , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=4 , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=1e-12 , UpperCamelCase=0.0 , UpperCamelCase=224 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_stages
lowerCamelCase_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
lowerCamelCase_ = [3, 3, 9, 3] if depths is None else depths
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = image_size
lowerCamelCase_ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
lowerCamelCase_ ,lowerCamelCase_ = get_aligned_output_features_output_indices(
out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
| 55 |
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
for i in range(1 , snake_case ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : str = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 328 | 0 |
# flake8: noqa
# Lint as: python3
__A : Union[str, Any] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 138 |
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
__UpperCamelCase = 0
for character in keyword:
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCamelCase = len(self.adlist ) - 1
else:
__UpperCamelCase = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 0
while q:
__UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
__UpperCamelCase = self.adlist[state]['''fail_state''']
__UpperCamelCase = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__UpperCamelCase = 0
__UpperCamelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]:
'''simple docstring'''
__UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
__UpperCamelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__UpperCamelCase = self.adlist[current_state]['''fail_state''']
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__UpperCamelCase = 0
else:
__UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCamelCase = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE_ ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) / len(SCREAMING_SNAKE_CASE_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE_ , per_device_train_batch_size=SCREAMING_SNAKE_CASE_ , per_device_eval_batch_size=SCREAMING_SNAKE_CASE_ , predict_with_generate=SCREAMING_SNAKE_CASE_ , evaluation_strategy='''steps''' , do_train=SCREAMING_SNAKE_CASE_ , do_eval=SCREAMING_SNAKE_CASE_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
# start training
trainer.train()
| 330 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def A__ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
__UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
__UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 328 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ = 1000000 ):
UpperCAmelCase = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
UpperCAmelCase = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 0 |
a_ :int = [
(1_000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def lowercase_ (A : str ):
snake_case__ : str = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
snake_case__ : Any = 0
snake_case__ : int = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowercase_ (A : int ):
snake_case__ : Any = []
for arabic, roman in ROMAN:
((snake_case__) , (snake_case__)) : Any = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A_ ( A__ ) -> Dict:
for param in module.parameters():
a__ : Union[str, Any] = False
def A_ ( ) -> Any:
a__ : Dict = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
a__ : Union[str, Any] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A_ ( A__ ) -> Any:
a__ : int = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def A_ ( ) -> Any:
a__ : Dict = datetime.now()
a__ : Union[str, Any] = current_time.strftime('%H:%M:%S' )
return timestamp
| 99 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=7 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=18 , UpperCAmelCase : Tuple=30 , UpperCAmelCase : Union[str, Any]=400 , UpperCAmelCase : Any=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Optional[Any]=True , ):
A_ = size if size is not None else {"height": 18, "width": 18}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def __A ( self : List[str] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __A ( self : List[str] ):
A_ = LayoutLMvaImageProcessingTester(self )
@property
def __A ( self : int ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Optional[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "apply_ocr" ) )
def __A ( self : Optional[int] ):
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __A ( self : List[str] ):
pass
def __A ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# Test batched
A_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Union[str, Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : List[Any] ):
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __A ( self : Union[str, Any] ):
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A_ = Image.open(ds[0]["file"] ).convert("RGB" )
A_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A_ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
A_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 312 |
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCamelCase__ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
if isinstance(__A, torch.Tensor ):
return image
elif isinstance(__A, PIL.Image.Image ):
UpperCAmelCase__ = [image]
UpperCAmelCase__ = [trans(img.convert("RGB" ) ) for img in image]
UpperCAmelCase__ = torch.stack(__A )
return image
class A ( SCREAMING_SNAKE_CASE_ ):
def __init__(self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase_ (self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = min(int(num_inference_steps * strength ) , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=None ) -> Tuple:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(SCREAMING_SNAKE_CASE_ )}""" )
UpperCAmelCase__ = image.to(device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ = init_latents.shape
UpperCAmelCase__ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
# get latents
print("add noise to latents at timestep" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = init_latents
return latents
@torch.no_grad()
def __call__(self : int , __UpperCAmelCase : Tuple = None , __UpperCAmelCase : int = 0.8 , __UpperCAmelCase : str = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = 0.0 , __UpperCAmelCase : Dict = 5_0 , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = "pil" , __UpperCAmelCase : Any = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(SCREAMING_SNAKE_CASE_ )
# 2. Preprocess image
UpperCAmelCase__ = preprocess(SCREAMING_SNAKE_CASE_ )
# 3. set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device )
UpperCAmelCase__ , UpperCAmelCase__ = self.get_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.device )
UpperCAmelCase__ = timesteps[:1].repeat(SCREAMING_SNAKE_CASE_ )
# 4. Prepare latent variables
UpperCAmelCase__ = self.prepare_latents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.unet.dtype , self.device , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise model_output
UpperCAmelCase__ = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , use_clipped_model_output=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , ).prev_sample
UpperCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 65 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328 | 0 |
lowercase = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase = {value: key for key, value in encode_dict.items()}
def __UpperCAmelCase ( a_):
snake_case_ = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces')
return encoded
def __UpperCAmelCase ( a_):
if set(a_) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces')
snake_case_ = ''
for word in coded.split():
while len(a_) != 0:
decoded += decode_dict[word[:5]]
snake_case_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 178 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
__UpperCamelCase = []
__UpperCamelCase = Counter()
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
__UpperCamelCase = candidate + '''\n''' + test_case
__UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__UpperCamelCase , __UpperCamelCase = [], []
for result in results.values():
result.sort()
__UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = k
__UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) )
else:
assert len(snake_case ) == len(snake_case )
__UpperCamelCase = iter(snake_case )
return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
| 328 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : Any=10 ,_UpperCAmelCase : int=3 ,_UpperCAmelCase : List[Any]=32 * 4 ,_UpperCAmelCase : List[Any]=32 * 6 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : Union[str, Any]=32 ,):
_a : Dict = parent
_a : Any = batch_size
_a : int = is_training
_a : List[str] = use_auxiliary_loss
_a : Tuple = num_queries
_a : List[Any] = num_channels
_a : Union[str, Any] = min_size
_a : Tuple = max_size
_a : Dict = num_labels
_a : List[str] = mask_feature_size
def __lowercase ( self : str ):
_a : Any = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
_a : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE_ )
_a : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
_a : Any = (torch.rand((self.batch_size, self.num_labels) ,device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
_a : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Optional[Any] ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def __lowercase ( self : List[str] ):
_a , _a , _a , _a , _a : str = self.prepare_config_and_inputs()
_a : Dict = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : Optional[Any] = output.encoder_hidden_states
_a : Tuple = output.pixel_decoder_hidden_states
_a : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) ,config.decoder_config.decoder_layers )
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple=False ):
with torch.no_grad():
_a : Dict = MaskFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
_a : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE_ ,pixel_mask=SCREAMING_SNAKE_CASE_ )
_a : Dict = model(SCREAMING_SNAKE_CASE_ ,output_hidden_states=SCREAMING_SNAKE_CASE_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ):
_a : Optional[Any] = MaskFormerForInstanceSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(_UpperCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : int = model(pixel_values=SCREAMING_SNAKE_CASE_ ,pixel_mask=SCREAMING_SNAKE_CASE_ )
_a : Any = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
_a : Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_ ,pixel_mask=SCREAMING_SNAKE_CASE_ ,mask_labels=SCREAMING_SNAKE_CASE_ ,class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __magic_name__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCAmelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase : Dict = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Tuple = False
def __lowercase ( self : str ):
_a : Any = MaskFormerModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE_ ,has_text_modality=SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Any ):
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : List[str] ):
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormer does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method' )
def __lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(reason='MaskFormer is not a generative model' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='MaskFormer does not use token embeddings' )
def __lowercase ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Optional[Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[str] ):
pass
def __lowercase ( self : Tuple ):
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
_a : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : int = [*signature.parameters.keys()]
_a : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE_ )
@slow
def __lowercase ( self : Optional[Any] ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
_a : Optional[Any] = MaskFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Any ):
_a : Any = (self.model_tester.min_size,) * 2
_a : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) ,device=SCREAMING_SNAKE_CASE_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=SCREAMING_SNAKE_CASE_ ),
'class_labels': torch.zeros(2 ,10 ,device=SCREAMING_SNAKE_CASE_ ).long(),
}
_a : str = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(SCREAMING_SNAKE_CASE_ )
_a : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[Any] ):
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ,output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Union[str, Any] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
_a : Dict = model(**SCREAMING_SNAKE_CASE_ ,output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_a : int = self.all_model_classes[1]
_a , _a , _a , _a , _a : Any = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
_a : List[Any] = model(SCREAMING_SNAKE_CASE_ ,mask_labels=SCREAMING_SNAKE_CASE_ ,class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __lowercase ( self : int ):
_a : Optional[int] = self.all_model_classes[1]
_a , _a , _a , _a , _a : str = self.model_tester.prepare_config_and_inputs()
_a : int = True
_a : Any = True
_a : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
_a : List[Any] = model(SCREAMING_SNAKE_CASE_ ,mask_labels=SCREAMING_SNAKE_CASE_ ,class_labels=SCREAMING_SNAKE_CASE_ )
_a : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_a : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def __lowerCamelCase ( ) -> List[Any]:
_a : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[int] ):
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco' )
if is_vision_available()
else None
)
def __lowercase ( self : Optional[Any] ):
_a : List[str] = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco' ).to(SCREAMING_SNAKE_CASE_ )
_a : List[str] = self.default_image_processor
_a : Dict = prepare_img()
_a : str = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
_a : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : Tuple = model(**SCREAMING_SNAKE_CASE_ )
_a : Union[str, Any] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
_a : Optional[Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
_a : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( self : int ):
_a : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(SCREAMING_SNAKE_CASE_ )
.eval()
)
_a : Union[str, Any] = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
_a : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : List[Any] = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
_a : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
_a : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_a : Any = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
_a : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Dict = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( self : Dict ):
_a : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff' )
.to(SCREAMING_SNAKE_CASE_ )
.eval()
)
_a : Any = self.default_image_processor
_a : Optional[int] = prepare_img()
_a : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_ ,return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
_a : int = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ ,(1, 3, 800, 1088) )
with torch.no_grad():
_a : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
_a : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
_a : Optional[Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.7711]]
_a : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
_a : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_a : Dict = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,SCREAMING_SNAKE_CASE_ ,atol=SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( self : str ):
_a : int = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco' )
.to(SCREAMING_SNAKE_CASE_ )
.eval()
)
_a : Any = self.default_image_processor
_a : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(SCREAMING_SNAKE_CASE_ )
_a : Dict = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['mask_labels']]
_a : Optional[Any] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs['class_labels']]
with torch.no_grad():
_a : str = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 89 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
lowercase__ : Optional[Any] = ["names", "prefix"]
lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"]
lowercase__ : List[str] = ["date_format"]
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
"""simple docstring"""
_snake_case = ","
_snake_case = None
_snake_case = "infer"
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = False
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = None
_snake_case = "."
_snake_case = None
_snake_case = '"'
_snake_case = 0
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = True
_snake_case = True
_snake_case = 0
_snake_case = True
_snake_case = False
_snake_case = None
_snake_case = 10000
_snake_case = None
_snake_case = "strict"
_snake_case = "error"
_snake_case = None
def A__ ( self )-> Any:
'''simple docstring'''
if self.delimiter is not None:
__UpperCamelCase = self.delimiter
if self.column_names is not None:
__UpperCamelCase = self.column_names
@property
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case = CsvConfig
def A__ ( self )-> Any:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
__UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
__UpperCamelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = [files]
__UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table:
'''simple docstring'''
if self.config.features is not None:
__UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ):
# cheaper cast
__UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return pa_table
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
__UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" )
raise
| 328 | 0 |
def __magic_name__ ( __a : int = 100 ):
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = 0
UpperCamelCase__ = n + 1 # maximum limit
for a in range(2 , __a ):
for b in range(2 , __a ):
UpperCamelCase__ = a**b # calculates the current power
collect_powers.add(__a ) # adds the result to the set
return len(__a )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 244 |
from __future__ import annotations
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def A_ ( snake_case : int ) -> list[int]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
__UpperCamelCase = []
for num in range(len(snake_case ) ):
__UpperCamelCase = 0
while 2 * i * i <= odd_composites[num]:
__UpperCamelCase = odd_composites[num] - 2 * i * i
if is_prime(snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(snake_case ) == n:
return list_nums
return []
def A_ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }")
| 328 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ : int = pytest.mark.integration
a_ : Dict = {"comet"}
a_ : Optional[Any] = importlib.util.find_spec("""fairseq""") is not None
a_ : str = {"code_eval"}
a_ : int = os.name == "nt"
a_ : int = {"bertscore", "frugalscore", "perplexity"}
a_ : Optional[int] = importlib.util.find_spec("""transformers""") is not None
def __snake_case ( UpperCAmelCase_ : int ):
@wraps(UpperCAmelCase_ )
def wrapper(self : Dict , UpperCAmelCase_ : Any ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCAmelCase_ )
return wrapper
def __snake_case ( UpperCAmelCase_ : Tuple ):
@wraps(UpperCAmelCase_ )
def wrapper(self : Optional[int] , UpperCAmelCase_ : Dict ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCAmelCase_ )
return wrapper
def __snake_case ( UpperCAmelCase_ : List[Any] ):
@wraps(UpperCAmelCase_ )
def wrapper(self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCAmelCase_ )
return wrapper
def __snake_case ( ):
lowerCamelCase_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@local
class snake_case ( parameterized.TestCase ):
"""simple docstring"""
_lowerCamelCase = {}
_lowerCamelCase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "[...]"
lowerCamelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , SCREAMING_SNAKE_CASE_ ) ).module_path )
lowerCamelCase_ = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_ )
# check parameters
lowerCamelCase_ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCamelCase_ = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "[...]"
lowerCamelCase_ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , SCREAMING_SNAKE_CASE_ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCamelCase_ = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_ ):
yield
else:
yield
@contextmanager
def snake_case ( self ):
"""simple docstring"""
def load_local_metric(UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
return load_metric(os.path.join("metrics" , SCREAMING_SNAKE_CASE_ ) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCamelCase_ = load_local_metric
yield
@classmethod
def snake_case ( cls , UpperCamelCase ):
"""simple docstring"""
def wrapper(UpperCamelCase ):
lowerCamelCase_ = contextmanager(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def __snake_case ( UpperCAmelCase_ : Any ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class snake_case ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCamelCase_ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def __snake_case ( UpperCAmelCase_ : Union[str, Any] ):
import torch
def bert_cos_score_idf(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCAmelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCamelCase_ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def __snake_case ( UpperCAmelCase_ : List[str] ):
def load_from_checkpoint(UpperCAmelCase_ : Dict ):
class snake_case :
"""simple docstring"""
def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE_ ) == 2
lowerCamelCase_ = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCamelCase_ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCamelCase_ = load_from_checkpoint
yield
def __snake_case ( ):
lowerCamelCase_ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCamelCase_ = "ERROR"
lowerCamelCase_ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(UpperCAmelCase_ , match=re.escape(UpperCAmelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCAmelCase_ )
| 55 |
from __future__ import annotations
from collections.abc import Callable
def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float:
'''simple docstring'''
__UpperCamelCase = x_start
__UpperCamelCase = fnc(snake_case )
__UpperCamelCase = 0.0
for _ in range(snake_case ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
__UpperCamelCase = (x_end - x_start) / steps + xa
__UpperCamelCase = fnc(snake_case )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
__UpperCamelCase = xa
__UpperCamelCase = fxa
return area
if __name__ == "__main__":
def A_ ( snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowercase__ : List[str] = 1_0
while i <= 1_0_0_0_0_0:
print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 328 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : int = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : int , **UpperCAmelCase_ : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : str , **UpperCAmelCase_ : Union[str, Any] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Optional[Any] , **UpperCAmelCase_ : Optional[int] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : Any ):
lowerCAmelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Tuple ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : List[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Union[str, Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
lowerCAmelCase : str = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : Dict = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[int] = self.get_tokenizer()
lowerCAmelCase : int = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[str] = 'lower newer'
lowerCAmelCase : Dict = processor(text=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : int = self.get_tokenizer()
lowerCAmelCase : int = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Union[str, Any] = 'lower newer'
lowerCAmelCase : int = self.prepare_image_inputs()
lowerCAmelCase : int = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Tuple = self.get_tokenizer()
lowerCAmelCase : Any = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : int = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : str = self.get_image_processor()
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : int = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase : List[str] = 'lower newer'
lowerCAmelCase : int = self.prepare_image_inputs()
lowerCAmelCase : List[Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 138 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : List[str] = ["model.decoder.embed_positions.weights"]
def A_ ( snake_case : Any ) -> List[Any]:
'''simple docstring'''
if "emb" in name:
__UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__UpperCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__UpperCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__UpperCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]:
'''simple docstring'''
__UpperCamelCase = list(state_dict.keys() )
__UpperCamelCase = {}
for key in keys:
__UpperCamelCase = state_dict.pop(snake_case )
__UpperCamelCase = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCamelCase = val[:hidden_size, :]
__UpperCamelCase = val[hidden_size : 2 * hidden_size, :]
__UpperCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCamelCase = val
else:
__UpperCamelCase = val
return state_dict, enc_dec_proj_state_dict
def A_ ( snake_case : str ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
__UpperCamelCase = 1024
__UpperCamelCase = 24
__UpperCamelCase = 16
elif checkpoint == "medium":
__UpperCamelCase = 1536
__UpperCamelCase = 48
__UpperCamelCase = 24
elif checkpoint == "large":
__UpperCamelCase = 2048
__UpperCamelCase = 48
__UpperCamelCase = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
__UpperCamelCase = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case )
__UpperCamelCase = decoder_config_from_checkpoint(snake_case )
__UpperCamelCase = fairseq_model.lm.state_dict()
__UpperCamelCase , __UpperCamelCase = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
__UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__UpperCamelCase = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(snake_case ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
__UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
__UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
__UpperCamelCase = 2048
__UpperCamelCase = 2048
# set other default generation config params
__UpperCamelCase = int(30 * audio_encoder.config.frame_rate )
__UpperCamelCase = True
__UpperCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a_ = logging.getLogger(__name__)
a_ = 50 # max width of layer names
a_ = 70 # max width of quantizer names
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' ,type=_UpperCamelCase ,default=8 ,help='''weight precision''' )
group.add_argument('''--aprec''' ,type=_UpperCamelCase ,default=8 ,help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' ,action='''store_true''' ,help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' ,action='''store_true''' ,help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' ,action='''store_true''' ,help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' ,type=_UpperCamelCase ,nargs='''+''' ,help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' ,type=_UpperCamelCase ,help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' ,type=_UpperCamelCase ,help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' ,default='''max''' ,help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' ,default=_UpperCamelCase ,type=_UpperCamelCase ,help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' ,action='''store_true''' ,help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' ,metavar='''N''' ,type=_UpperCamelCase ,help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' ,action='''store_true''' ,help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) ,)
def a__ ( _UpperCamelCase : int ):
if args.calibrator == "max":
__lowerCamelCase = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
__lowerCamelCase = '''histogram'''
elif args.calibrator == "mse":
__lowerCamelCase = '''histogram'''
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
__lowerCamelCase = QuantDescriptor(num_bits=args.aprec ,calib_method=_UpperCamelCase )
__lowerCamelCase = QuantDescriptor(num_bits=args.wprec ,axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCamelCase )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : str ,_UpperCamelCase : List[str]=False ,_UpperCamelCase : Any=False ):
logger.info('''Configuring Model for Quantization''' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_UpperCamelCase ,['''embeddings'''] ,which='''weight''' ,_disabled=_UpperCamelCase )
if args.quant_disable:
set_quantizer_by_name(_UpperCamelCase ,[''''''] ,_disabled=_UpperCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_UpperCamelCase ,args.quant_disable_keyword ,_disabled=_UpperCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_UpperCamelCase ,[R'''layer.\d+.''' + args.quant_disable_layer_module] ,_disabled=_UpperCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_UpperCamelCase ,[R'''layer.\d+.''' + args.quant_enable_layer_module] ,_disabled=_UpperCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_UpperCamelCase )
if args.fuse_qkv:
fuse_qkv(_UpperCamelCase ,_UpperCamelCase )
if args.clip_gelu:
clip_gelu(_UpperCamelCase ,args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_UpperCamelCase )
def a__ ( _UpperCamelCase : str ):
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : int ):
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator ,calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' ,percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_UpperCamelCase )
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[Any] ):
def fusea(_UpperCamelCase : Dict ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple ):
for mod in [qq, qk, qv]:
if not hasattr(_UpperCamelCase ,'''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
__lowerCamelCase = qq._amax.detach().item()
__lowerCamelCase = qk._amax.detach().item()
__lowerCamelCase = qv._amax.detach().item()
__lowerCamelCase = max(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
qq._amax.fill_(_UpperCamelCase )
qk._amax.fill_(_UpperCamelCase )
qv._amax.fill_(_UpperCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer ,mod.matmul_k_input_quantizer ,mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer ,mod.key._weight_quantizer ,mod.value._weight_quantizer )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : int ):
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
__lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCamelCase )
__lowerCamelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def a__ ( _UpperCamelCase : int ):
for name, mod in model.named_modules():
if hasattr(_UpperCamelCase ,'''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
__lowerCamelCase = mod.weight.shape[0]
__lowerCamelCase = mod._weight_quantizer._amax.detach()
__lowerCamelCase = torch.ones(_UpperCamelCase ,dtype=amax.dtype ,device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def a__ ( _UpperCamelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(_UpperCamelCase ,'''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer ,'''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__lowerCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__lowerCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
__lowerCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight ,axis=_UpperCamelCase ,keepdims=_UpperCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
__lowerCamelCase = amax
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : int=25 ,_UpperCamelCase : Optional[int]=1_80 ,_UpperCamelCase : int=None ):
if ignore is None:
__lowerCamelCase = []
elif not isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = [ignore]
__lowerCamelCase = 0
for name, mod in model.named_modules():
if not hasattr(_UpperCamelCase ,'''weight''' ):
continue
__lowerCamelCase = max(_UpperCamelCase ,len(_UpperCamelCase ) )
for name, mod in model.named_modules():
__lowerCamelCase = getattr(_UpperCamelCase ,'''_input_quantizer''' ,_UpperCamelCase )
__lowerCamelCase = getattr(_UpperCamelCase ,'''_weight_quantizer''' ,_UpperCamelCase )
if not hasattr(_UpperCamelCase ,'''weight''' ):
continue
if type(_UpperCamelCase ) in ignore:
continue
if [True for s in ignore if type(_UpperCamelCase ) is str and s in name]:
continue
__lowerCamelCase = F"""Act:{input_q.extra_repr()}"""
__lowerCamelCase = F"""Wgt:{weight_q.extra_repr()}"""
__lowerCamelCase = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_UpperCamelCase ) <= line_width:
logger.info(_UpperCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def a__ ( _UpperCamelCase : Dict ):
__lowerCamelCase = 0
for name, mod in model.named_modules():
if isinstance(_UpperCamelCase ,pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : str ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if quantizer_mod is not None:
assert hasattr(_UpperCamelCase ,_UpperCamelCase )
setattr(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Optional[int]="both" ,**_UpperCamelCase : int ):
__lowerCamelCase = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_UpperCamelCase ,_UpperCamelCase ,'''_input_quantizer''' ,_UpperCamelCase ,_UpperCamelCase )
if which in ["weight", "both"]:
set_quantizer(_UpperCamelCase ,_UpperCamelCase ,'''_weight_quantizer''' ,_UpperCamelCase ,_UpperCamelCase )
logger.info(_UpperCamelCase )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,**_UpperCamelCase : Tuple ):
for name, mod in model.named_modules():
if hasattr(_UpperCamelCase ,'''_input_quantizer''' ) or hasattr(_UpperCamelCase ,'''_weight_quantizer''' ):
for n in names:
if re.search(_UpperCamelCase ,_UpperCamelCase ):
set_quantizers(_UpperCamelCase ,_UpperCamelCase ,**_UpperCamelCase )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
logger.info(_UpperCamelCase )
| 330 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : List[str] = 1_6
lowercase__ : str = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 328 | 0 |
"""simple docstring"""
from functools import reduce
snake_case_ = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def _lowerCAmelCase ( lowercase_ = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase_ , lowercase_ : str(int(lowercase_ ) * int(lowercase_ ) ) , n[i : i + 13] ) )
for i in range(len(lowercase_ ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Union[str, Any] = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowercase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'whisper'
_snake_case = ['past_key_values']
_snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = num_mel_bins
__UpperCamelCase = d_model
__UpperCamelCase = encoder_layers
__UpperCamelCase = encoder_attention_heads
__UpperCamelCase = decoder_layers
__UpperCamelCase = decoder_attention_heads
__UpperCamelCase = decoder_ffn_dim
__UpperCamelCase = encoder_ffn_dim
__UpperCamelCase = dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = activation_function
__UpperCamelCase = init_std
__UpperCamelCase = encoder_layerdrop
__UpperCamelCase = decoder_layerdrop
__UpperCamelCase = use_cache
__UpperCamelCase = encoder_layers
__UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCamelCase = max_source_positions
__UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase = classifier_proj_size
__UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
__UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def A__ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__UpperCamelCase = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__UpperCamelCase = {0: '''batch'''}
else:
__UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
return common_inputs
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]:
'''simple docstring'''
__UpperCamelCase = OrderedDict()
__UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = encoder_inputs['''input_features'''].shape[2]
__UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = encoder_inputs.pop('''input_features''' )
__UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__UpperCamelCase = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def A__ ( self )-> float:
'''simple docstring'''
return 1E-3
| 328 | 0 |
import re
from filelock import FileLock
try:
import nltk
a_ :List[str] = True
except (ImportError, ModuleNotFoundError):
a_ :Union[str, Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase_ (A : str ):
re.sub('<n>' , '' , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 277 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : Tuple = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = 'xlnet'
_snake_case = ['mems']
_snake_case = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
__UpperCamelCase = kwargs['''use_cache''']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]:
'''simple docstring'''
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 328 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase : Any = logging.get_logger(__name__)
class A__ :
"""simple docstring"""
__A : List[Any] = 4_2
__A : Tuple = None
@staticmethod
def __lowercase ( ) -> Dict:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self) -> List[str]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.')
@classmethod
def __lowercase ( cls) -> Optional[int]:
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__A : str = '''optuna'''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_optuna_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> str:
'''simple docstring'''
return run_hp_search_optuna(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
return default_hp_space_optuna(SCREAMING_SNAKE_CASE_)
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__A : Optional[int] = '''ray'''
__A : Tuple = '''\'ray[tune]\''''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_ray_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> List[Any]:
'''simple docstring'''
return run_hp_search_ray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def __lowercase ( self , lowercase) -> Tuple:
'''simple docstring'''
return default_hp_space_ray(SCREAMING_SNAKE_CASE_)
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__A : Optional[int] = '''sigopt'''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_sigopt_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE_)
class A__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__A : Optional[Any] = '''wandb'''
@staticmethod
def __lowercase ( ) -> List[Any]:
'''simple docstring'''
return is_wandb_available()
def __lowercase ( self , lowercase , lowercase , lowercase , **lowercase) -> int:
'''simple docstring'''
return run_hp_search_wandb(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
def __lowercase ( self , lowercase) -> int:
'''simple docstring'''
return default_hp_space_wandb(SCREAMING_SNAKE_CASE_)
lowercase : Any = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def A_ ( ) -> str:
a__ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
a__ : List[str] = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F'{len(A__ )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 99 |
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = data
__UpperCamelCase = None
def __repr__( self )-> str:
'''simple docstring'''
return F"Node({self.data})"
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = None
def __iter__( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.head
while node:
yield node.data
__UpperCamelCase = node.next
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self )-> str:
'''simple docstring'''
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__UpperCamelCase = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = current.next
__UpperCamelCase = data
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
__UpperCamelCase = new_node
elif index == 0:
__UpperCamelCase = self.head # link new_node to head
__UpperCamelCase = new_node
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = new_node
def A__ ( self )-> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__UpperCamelCase = self.head # default first node
if index == 0:
__UpperCamelCase = self.head.next
else:
__UpperCamelCase = self.head
for _ in range(index - 1 ):
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next
__UpperCamelCase = temp.next.next
return delete_node.data
def A__ ( self )-> bool:
'''simple docstring'''
return self.head is None
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = self.head
while current:
# Store the current node's next node.
__UpperCamelCase = current.next
# Make the current node's next point backwards
__UpperCamelCase = prev
# Make the previous node be the current node
__UpperCamelCase = current
# Make the current node the next node (to progress iteration)
__UpperCamelCase = next_node
# Return prev in order to put the head at the end
__UpperCamelCase = prev
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case ) == i
linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case ) == 9
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__UpperCamelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) )
def A_ ( ) -> None:
'''simple docstring'''
__UpperCamelCase = [
-9,
100,
Node(77345112 ),
'''dlrow olleH''',
7,
5555,
0,
-192.55555,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__UpperCamelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__UpperCamelCase = linked_list.delete_head()
assert result == -9
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__UpperCamelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__UpperCamelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case )
assert (
str(snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A_ ( ) -> Any:
'''simple docstring'''
from doctest import testmod
testmod()
__UpperCamelCase = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(snake_case )
print('''\nReading/changing Node data using indexing:''' )
print(f"Element at Position 1: {linked_list[1]}" )
__UpperCamelCase = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(snake_case )
print(f"length of linked_list is : {len(snake_case )}" )
if __name__ == "__main__":
main()
| 328 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_lowerCamelCase : int = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : Union[str, Any] = Features({'audio': Audio()} )
_lowerCamelCase : Any = Features({'transcription': Value('string' )} )
_lowerCamelCase : Optional[int] = 'audio'
_lowerCamelCase : Optional[Any] = 'transcription'
def __A ( self : List[Any] , UpperCAmelCase : Dict ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , SCREAMING_SNAKE_CASE_ ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
A_ = copy.deepcopy(self )
A_ = self.input_schema.copy()
A_ = features[self.audio_column]
A_ = input_schema
return task_template
@property
def __A ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"} | 312 |
import math
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num
def A_ ( snake_case : int ) -> bool:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = n
while left <= right:
__UpperCamelCase = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCamelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = {}
with open(__A, "r" ) as file:
for line_number, line in enumerate(__A ):
UpperCAmelCase__ = line.strip()
if line:
UpperCAmelCase__ = line.split()
UpperCAmelCase__ = line_number
UpperCAmelCase__ = words[0]
UpperCAmelCase__ = value
return result
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = getattr(__A, __A ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = hf_pointer
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase__ = value[0]
else:
UpperCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase__ = value
elif weight_type == "weight_g":
UpperCAmelCase__ = value
elif weight_type == "weight_v":
UpperCAmelCase__ = value
elif weight_type == "bias":
UpperCAmelCase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
UpperCAmelCase__ = getattr(__A, __A )
UpperCAmelCase__ = value
else:
UpperCAmelCase__ = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
UpperCAmelCase__ = PARAM_MAPPING[full_name.split("." )[-1]]
UpperCAmelCase__ = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase__ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase__ = ".".join([key, hf_param_name] )
else:
UpperCAmelCase__ = key
UpperCAmelCase__ = value if "lm_head" in full_key else value[0]
UpperCamelCase__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase__ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCAmelCase__ = True
if "*" in mapped_key:
UpperCAmelCase__ = name.split(__A )[0].split("." )[-2]
UpperCAmelCase__ = mapped_key.replace("*", __A )
if "weight_g" in name:
UpperCAmelCase__ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase__ = "weight_v"
elif "bias" in name:
UpperCAmelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase__ = "weight"
else:
UpperCAmelCase__ = None
if hf_dict is not None:
rename_dict(__A, __A, __A, __A, __A )
else:
set_recursively(__A, __A, __A, __A, __A )
return is_used
return is_used
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
UpperCAmelCase__ = fairseq_model.state_dict()
UpperCAmelCase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
__A, __A, __A, __A, hf_model.config.feat_extract_norm == "group", )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = load_wavaveca_layer(__A, __A, __A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __A, __A, __A, __A, __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = full_name.split("conv_layers." )[-1]
UpperCAmelCase__ = name.split("." )
UpperCAmelCase__ = int(items[0] )
UpperCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase__ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowerCAmelCase_ ( __A, __A, __A=None, __A=None, __A=True, __A=False ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ = WavaVecaConfig.from_pretrained(__A )
else:
UpperCAmelCase__ = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase__ = read_txt_into_dict(__A )
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = WavaVecaForSequenceClassification(__A )
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
UpperCAmelCase__ = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase__ = target_dict.pad_index
UpperCAmelCase__ = target_dict.bos_index
UpperCAmelCase__ = target_dict.eos_index
UpperCAmelCase__ = len(target_dict.symbols )
UpperCAmelCase__ = os.path.join(__A, "vocab.json" )
if not os.path.isdir(__A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__A ) )
return
os.makedirs(__A, exist_ok=__A )
UpperCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
with open(__A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(__A, __A )
UpperCAmelCase__ = WavaVecaCTCTokenizer(
__A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=__A, )
UpperCAmelCase__ = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=__A, return_attention_mask=__A, )
UpperCAmelCase__ = WavaVecaProcessor(feature_extractor=__A, tokenizer=__A )
processor.save_pretrained(__A )
UpperCAmelCase__ = WavaVecaForCTC(__A )
else:
UpperCAmelCase__ = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
UpperCAmelCase__ = argparse.Namespace(task="audio_pretraining" )
UpperCAmelCase__ = fairseq.tasks.setup_task(__A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=__A )
UpperCAmelCase__ = model[0].eval()
recursively_load_weights(__A, __A, not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 65 |
def A_ ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowercase__ : List[str] = generate_large_matrix()
lowercase__ : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A_ ( snake_case : list[list[int]] ) -> None:
'''simple docstring'''
assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid )
assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) )
def A_ ( snake_case : list[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCamelCase = (left + right) // 2
__UpperCamelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCamelCase = mid + 1
else:
__UpperCamelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(snake_case )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(grid[0] )
for i in range(len(snake_case ) ):
__UpperCamelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(snake_case ) * len(grid[0] )) - total
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def A_ ( snake_case : list[list[int]] ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
for row in grid:
for i, number in enumerate(snake_case ):
if number < 0:
total += len(snake_case ) - i
break
return total
def A_ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCamelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
import requests
from bsa import BeautifulSoup
def __UpperCAmelCase ( a_ , a_):
snake_case_ = BeautifulSoup(requests.get(a_ , params=a_).content , 'html.parser')
snake_case_ = soup.find('div' , attrs={'class': 'gs_ri'})
snake_case_ = div.find('div' , attrs={'class': 'gs_fl'}).find_all('a')
return anchors[2].get_text()
if __name__ == "__main__":
lowercase = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 178 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase__ : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size['''height'''] / crop_pct )
else:
__UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
elif "height" in size and "width" in size:
__UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image:
'''simple docstring'''
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' )
__UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
__UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 328 | 0 |
'''simple docstring'''
from copy import deepcopy
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : int = None ):
if arr is None and size is not None:
_a : Optional[Any] = size
_a : Union[str, Any] = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('Either arr or size must be specified' )
def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ):
_a : List[Any] = len(SCREAMING_SNAKE_CASE_ )
_a : Dict = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 ,self.size ):
_a : Dict = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowercase ( self : Any ):
_a : List[Any] = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
_a : Optional[Any] = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[int] ):
return index + (index & (-index))
@staticmethod
def __lowercase ( _UpperCAmelCase : Tuple ):
return index - (index & (-index))
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_a : Optional[int] = self.next_(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ):
self.add(SCREAMING_SNAKE_CASE_ ,value - self.get(SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any] ):
if right == 0:
return 0
_a : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_a : int = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ):
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Any ,_UpperCAmelCase : str ):
return self.query(SCREAMING_SNAKE_CASE_ ,index + 1 )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ):
value -= self.tree[0]
if value < 0:
return -1
_a : Optional[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_a : Union[str, Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase_ = random.Random()
if is_torch_available():
import torch
def __magic_name__ ( __a : List[Any] , __a : str=1.0 , __a : Tuple=None , __a : Any=None ):
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=20_00 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=1_60_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def UpperCAmelCase_ (self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
def _flatten(SCREAMING_SNAKE_CASE_ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
UpperCamelCase__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __A( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ASTFeatureExtractor
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ASTFeatureExtractionTester(self )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
UpperCamelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
UpperCamelCase__ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test batched
UpperCamelCase__ = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_values
UpperCamelCase__ = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCamelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_values
UpperCamelCase__ = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
@require_torch
def UpperCAmelCase_ (self ):
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(1_00 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
from datasets import load_dataset
UpperCamelCase__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("""id""" ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
@require_torch
def UpperCAmelCase_ (self ):
UpperCamelCase__ = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = ASTFeatureExtractor()
UpperCamelCase__ = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 244 |
from math import factorial
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case ) for x in str(factorial(snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 328 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : List[str] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 55 |
def A_ ( snake_case : list ) -> list:
'''simple docstring'''
__UpperCamelCase = len(snake_case )
for i in range(1 , snake_case ):
__UpperCamelCase = collection[i]
__UpperCamelCase = 0
__UpperCamelCase = i - 1
while low <= high:
__UpperCamelCase = (low + high) // 2
if val < collection[mid]:
__UpperCamelCase = mid - 1
else:
__UpperCamelCase = mid + 1
for j in range(snake_case , snake_case , -1 ):
__UpperCamelCase = collection[j - 1]
__UpperCamelCase = val
return collection
if __name__ == "__main__":
lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase__ : str = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 328 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A : int = namedtuple('''covid_data''', '''cases deaths recovered''')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
lowerCAmelCase : Optional[int] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_UpperCAmelCase ).content ).xpath(_UpperCAmelCase ) )
__A : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 138 |
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(SCREAMING_SNAKE_CASE_ )
self.set_fail_transitions()
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None:
'''simple docstring'''
__UpperCamelCase = 0
for character in keyword:
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__UpperCamelCase = len(self.adlist ) - 1
else:
__UpperCamelCase = next_state
self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> None:
'''simple docstring'''
__UpperCamelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = 0
while q:
__UpperCamelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None
and state != 0
):
__UpperCamelCase = self.adlist[state]['''fail_state''']
__UpperCamelCase = self.find_next_state(
SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
__UpperCamelCase = 0
__UpperCamelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]:
'''simple docstring'''
__UpperCamelCase = {} # returns a dict with keywords and list of its occurrences
__UpperCamelCase = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
while (
self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None
and current_state != 0
):
__UpperCamelCase = self.adlist[current_state]['''fail_state''']
__UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] )
if next_state is None:
__UpperCamelCase = 0
else:
__UpperCamelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__UpperCamelCase = []
result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Optional[int] ):
__lowerCamelCase = torch.load(_UpperCamelCase ,map_location='''cpu''' )
__lowerCamelCase = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__lowerCamelCase = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__lowerCamelCase = v
else:
__lowerCamelCase = v
__lowerCamelCase = chkpt['''params''']
__lowerCamelCase = {n: v for n, v in config.items() if not isinstance(_UpperCamelCase ,(torch.FloatTensor, numpy.ndarray) )}
__lowerCamelCase = chkpt['''dico_word2id''']
__lowerCamelCase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' ,'''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__lowerCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase ,_UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase ,indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase ,indent=2 ) + '''\n''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 330 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self )-> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_labels
__UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str:
'''simple docstring'''
__UpperCamelCase = self.num_choices
__UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs
__UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_snake_case = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = True
_snake_case = True
_snake_case = True
def A__ ( self )-> Dict:
'''simple docstring'''
__UpperCamelCase = DistilBertModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def A__ ( self )-> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self )-> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def A__ ( self )-> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase = True
__UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
__UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
__UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 328 | 0 |
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = int(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = t // 3600, (t // 60) % 60, t % 60
return F"""{h}:{m:02d}:{s:02d}""" if h != 0 else F"""{m:02d}:{s:02d}"""
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=300 ):
return F"""\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n """
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase = F"""{elt:.6f}""" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += F""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class A_ :
"""simple docstring"""
__UpperCamelCase = 5
__UpperCamelCase = 0.2
def __init__( self :Optional[int] , lowercase_ :List[Any] , lowercase_ :Any = None , lowercase_ :Optional[int] = True , lowercase_ :Dict = None , lowercase_ :List[str] = 3_00 , ) -> Any:
UpperCAmelCase = total
UpperCAmelCase = '' if prefix is None else prefix
UpperCAmelCase = leave
UpperCAmelCase = parent
UpperCAmelCase = width
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Dict , lowercase_ :Tuple = False , lowercase_ :Union[str, Any] = None ) -> Optional[Any]:
UpperCAmelCase = value
if comment is not None:
UpperCAmelCase = comment
if self.last_value is None:
UpperCAmelCase = UpperCAmelCase = time.time()
UpperCAmelCase = UpperCAmelCase = value
UpperCAmelCase = UpperCAmelCase = None
UpperCAmelCase = self.warmup
UpperCAmelCase = 1
self.update_bar(SCREAMING_SNAKE_CASE_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase = time.time()
UpperCAmelCase = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase = None
if value >= self.total:
UpperCAmelCase = self.total
UpperCAmelCase = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase = self.average_time_per_item * (self.total - value)
self.update_bar(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = value
UpperCAmelCase = current_time
if self.average_time_per_item is None:
UpperCAmelCase = 1
else:
UpperCAmelCase = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any]=None ) -> Tuple:
UpperCAmelCase = ' ' * (len(str(self.total ) ) - len(str(SCREAMING_SNAKE_CASE_ ) )) + str(SCREAMING_SNAKE_CASE_ )
if self.elapsed_time is None:
UpperCAmelCase = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
UpperCAmelCase = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
UpperCAmelCase = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
f""" {format_time(self.predicted_remaining )}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f""", {self.comment}]"""
self.display()
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase__ ( self :int ) -> Tuple:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Tuple , lowercase_ :Any , lowercase_ :List[str]=None ) -> Dict:
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = None if column_names is None else [column_names]
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase = disp.display(disp.HTML(self.html_code ) , display_id=SCREAMING_SNAKE_CASE_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :int ) -> List[str]:
if self.inner_table is None:
UpperCAmelCase = [list(values.keys() ), list(values.values() )]
else:
UpperCAmelCase = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase__ ( self :str , lowercase_ :Tuple , lowercase_ :List[Any]=None , lowercase_ :List[Any]=3_00 ) -> Any:
UpperCAmelCase = NotebookProgressBar(SCREAMING_SNAKE_CASE_ , prefix=SCREAMING_SNAKE_CASE_ , parent=self , width=SCREAMING_SNAKE_CASE_ )
return self.child_bar
def UpperCAmelCase__ ( self :Dict ) -> str:
UpperCAmelCase = None
self.display()
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[int] ) -> List[str]:
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Dict , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , **lowercase_ :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
UpperCAmelCase = NotebookTrainingTracker(state.max_steps , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :Any , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :str , **lowercase_ :Tuple ) -> Any:
UpperCAmelCase = int(state.epoch ) if int(state.epoch ) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
UpperCAmelCase = False
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :List[str] , lowercase_ :int=None , **lowercase_ :Any ) -> List[Any]:
if not has_length(SCREAMING_SNAKE_CASE_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase = self.training_tracker.add_child(len(SCREAMING_SNAKE_CASE_ ) )
else:
UpperCAmelCase = NotebookProgressBar(len(SCREAMING_SNAKE_CASE_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :List[Any] , **lowercase_ :Any ) -> int:
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase = None
def UpperCAmelCase__ ( self :str , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Optional[int]=None , **lowercase_ :str ) -> Union[str, Any]:
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase = state.global_step
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Any , lowercase_ :List[Any]=None , **lowercase_ :List[str] ) -> Any:
if self.training_tracker is not None:
UpperCAmelCase = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
UpperCAmelCase = log['loss']
break
if self.first_column == "Epoch":
UpperCAmelCase = int(state.epoch )
else:
UpperCAmelCase = state.global_step
UpperCAmelCase = 'eval'
for k in metrics:
if k.endswith('_loss' ):
UpperCAmelCase = re.sub(R'\_loss$' , '' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop('total_flos' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop('epoch' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop(f"""{metric_key_prefix}_runtime""" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , SCREAMING_SNAKE_CASE_ )
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
UpperCAmelCase = v
else:
UpperCAmelCase = k.split('_' )
UpperCAmelCase = ' '.join([part.capitalize() for part in splits[1:]] )
UpperCAmelCase = v
self.training_tracker.write_line(SCREAMING_SNAKE_CASE_ )
self.training_tracker.remove_child()
UpperCAmelCase = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase = True
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :str , **lowercase_ :List[Any] ) -> str:
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" , force_update=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = None
| 78 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 0 |
from __future__ import annotations
a_ :Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase_ (A : list[list[int]] , A : list[int] , A : list[int] , A : int , A : list[list[int]] , ):
snake_case__ : Dict = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the reference grid
snake_case__ : List[str] = 1
snake_case__ : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(A ) )
] # the action grid
snake_case__ : Tuple = init[0]
snake_case__ : int = init[1]
snake_case__ : Any = 0
snake_case__ : int = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case__ : Any = [[f, g, x, y]]
snake_case__ : Dict = False # flag that is set when search is complete
snake_case__ : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(A ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case__ : List[str] = cell.pop()
snake_case__ : List[Any] = next_cell[2]
snake_case__ : Dict = next_cell[3]
snake_case__ : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case__ : List[Any] = True
else:
for i in range(len(A ) ): # to try out different valid actions
snake_case__ : Union[str, Any] = x + DIRECTIONS[i][0]
snake_case__ : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case__ : Union[str, Any] = g + cost
snake_case__ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case__ : Optional[int] = 1
snake_case__ : Union[str, Any] = i
snake_case__ : List[str] = []
snake_case__ : Optional[Any] = goal[0]
snake_case__ : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case__ : Optional[Any] = x - DIRECTIONS[action[x][y]][0]
snake_case__ : Union[str, Any] = y - DIRECTIONS[action[x][y]][1]
snake_case__ : List[Any] = xa
snake_case__ : str = ya
invpath.append([x, y] )
snake_case__ : Optional[Any] = []
for i in range(len(A ) ):
path.append(invpath[len(A ) - 1 - i] )
return path, action
if __name__ == "__main__":
a_ :Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a_ :str = [0, 0]
# all coordinates are given in format [y,x]
a_ :List[str] = [len(grid) - 1, len(grid[0]) - 1]
a_ :Any = 1
# the cost map which pushes the path closer to the goal
a_ :Optional[int] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a_ :Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a_ :List[Any] = 99
a_ :Dict = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 277 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]:
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Sql(
cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , )
# Build dataset for splits
__UpperCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__UpperCamelCase = dataset
__UpperCamelCase = name
__UpperCamelCase = con
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = to_sql_kwargs
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs )
return written
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas()
__UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return num_rows or len(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int:
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 328 | 0 |
def A_ ( A__ = 400_0000 ) -> int:
a__ : int = []
a__ , a__ : List[str] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(A__ )
a__ , a__ : Any = b, a + b
return sum(A__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 99 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 0 |
from typing import Any
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Tuple ):
A_ = data
A_ = None
def __repr__( self : Union[str, Any] ):
return f'''Node({self.data})'''
class _a :
"""simple docstring"""
def __init__( self : Optional[Any] ):
A_ = None
def __iter__( self : List[Any] ):
A_ = self.head
while node:
yield node.data
A_ = node.next
def __len__( self : List[str] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] )
def __getitem__( self : Dict , UpperCAmelCase : Dict ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A_ = self.head
for _ in range(SCREAMING_SNAKE_CASE_ ):
A_ = current.next
A_ = data
def __A ( self : str , UpperCAmelCase : Union[str, Any] ):
self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ )
def __A ( self : Dict , UpperCAmelCase : str ):
self.insert_nth(0 , SCREAMING_SNAKE_CASE_ )
def __A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A_ = Node(SCREAMING_SNAKE_CASE_ )
if self.head is None:
A_ = new_node
elif index == 0:
A_ = self.head # link new_node to head
A_ = new_node
else:
A_ = self.head
for _ in range(index - 1 ):
A_ = temp.next
A_ = temp.next
A_ = new_node
def __A ( self : Dict ): # print every node data
print(self )
def __A ( self : str ):
return self.delete_nth(0 )
def __A ( self : Union[str, Any] ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __A ( self : Any , UpperCAmelCase : Dict = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A_ = self.head # default first node
if index == 0:
A_ = self.head.next
else:
A_ = self.head
for _ in range(index - 1 ):
A_ = temp.next
A_ = temp.next
A_ = temp.next.next
return delete_node.data
def __A ( self : Optional[Any] ):
return self.head is None
def __A ( self : Tuple ):
A_ = None
A_ = self.head
while current:
# Store the current node's next node.
A_ = current.next
# Make the current node's next point backwards
A_ = prev
# Make the previous node be the current node
A_ = current
# Make the current node the next node (to progress iteration)
A_ = next_node
# Return prev in order to put the head at the end
A_ = prev
def __snake_case ( ):
"""simple docstring"""
A_ = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
A_ = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def __snake_case ( ):
"""simple docstring"""
A_ = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
A_ = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A_ = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A_ = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __snake_case ( ):
"""simple docstring"""
from doctest import testmod
testmod()
A_ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__UpperCamelCase )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
A_ = input("Enter New Value: " ).strip()
print("New list:" )
print(__UpperCamelCase )
print(f'''length of linked_list is : {len(__UpperCamelCase )}''' )
if __name__ == "__main__":
main() | 312 |
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
__UpperCamelCase = generate_pascal_triangle(snake_case )
for row_idx in range(snake_case ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = []
for current_row_idx in range(snake_case ):
__UpperCamelCase = populate_current_row(snake_case , snake_case )
triangle.append(snake_case )
return triangle
def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]:
'''simple docstring'''
__UpperCamelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase = 1, 1
for current_col_idx in range(1 , snake_case ):
calculate_current_element(
snake_case , snake_case , snake_case , snake_case )
return current_row
def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None:
'''simple docstring'''
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase = above_to_left_elt + above_to_right_elt
def A_ ( snake_case : int ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase = [[1]]
for row_index in range(1 , snake_case ):
__UpperCamelCase = [0] + result[-1] + [0]
__UpperCamelCase = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase = sum(divmod(snake_case , 2 ) )
__UpperCamelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase = row_first_half + row_second_half
result.append(snake_case )
return result
def A_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None:
__UpperCamelCase = f"{func.__name__}({value})"
__UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(snake_case , snake_case )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 328 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase__ = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
UpperCamelCase__ = dataset.iloc[:, 1:2].values
UpperCamelCase__ = dataset.iloc[:, 2].values
UpperCamelCase__ = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase__ = PolynomialFeatures(degree=4)
UpperCamelCase__ = poly_reg.fit_transform(X)
UpperCamelCase__ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
plt.scatter(__A, __A, color="red" )
plt.plot(__A, pol_reg.predict(poly_reg.fit_transform(__A ) ), color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 65 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase__ : Any = parser.parse_args()
lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase__ : List[str] = CLIPImageProcessor()
lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 328 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase = "Usage of script: script_name <size_of_canvas:int>"
lowercase = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( a_):
snake_case_ = [[False for i in range(a_)] for j in range(a_)]
return canvas
def __UpperCAmelCase ( a_):
for i, row in enumerate(a_):
for j, _ in enumerate(a_):
snake_case_ = bool(random.getrandbits(1))
def __UpperCAmelCase ( a_):
snake_case_ = np.array(a_)
snake_case_ = np.array(create_canvas(current_canvas.shape[0]))
for r, row in enumerate(a_):
for c, pt in enumerate(a_):
snake_case_ = __judge_point(
a_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2])
snake_case_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
snake_case_ = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
snake_case_ = pt
if pt:
if alive < 2:
snake_case_ = False
elif alive == 2 or alive == 3:
snake_case_ = True
elif alive > 3:
snake_case_ = False
else:
if alive == 3:
snake_case_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase = int(sys.argv[1])
# main working structure of this module.
lowercase = create_canvas(canvas_size)
seed(c)
lowercase = plt.subplots()
fig.show()
lowercase = ListedColormap(["w", "k"])
try:
while True:
lowercase = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 178 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor:
__UpperCamelCase = []
__UpperCamelCase = Counter()
__UpperCamelCase = 0
__UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ):
for candidate in candidates:
__UpperCamelCase = candidate + '''\n''' + test_case
__UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
futures.append(SCREAMING_SNAKE_CASE_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE_ ):
__UpperCamelCase = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__UpperCamelCase , __UpperCamelCase = [], []
for result in results.values():
result.sort()
__UpperCamelCase = [r[1]['''passed'''] for r in result]
total.append(len(SCREAMING_SNAKE_CASE_ ) )
correct.append(sum(SCREAMING_SNAKE_CASE_ ) )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = k
__UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(snake_case , snake_case ):
__UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) )
else:
assert len(snake_case ) == len(snake_case )
__UpperCamelCase = iter(snake_case )
return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
| 328 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Any = AudioLDMPipeline
_a : List[str] = TEXT_TO_AUDIO_PARAMS
_a : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
_a : int = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_UpperCAmelCase = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
_UpperCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Any:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
_UpperCAmelCase = audio[:10]
_UpperCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 3 * [inputs['prompt']]
# forward
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 3 * [inputs.pop('prompt' )]
_UpperCAmelCase = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
_UpperCAmelCase = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 3 * ['this is a negative prompt']
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = 3 * [inputs['prompt']]
# forward
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 3 * [inputs.pop('prompt' )]
_UpperCAmelCase = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
_UpperCAmelCase = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = embeds
# forward
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 'egg cracking'
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
_UpperCAmelCase = audio[:10]
_UpperCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
_UpperCAmelCase = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCAmelCase = 2
_UpperCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_UpperCAmelCase = 2
_UpperCAmelCase = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_UpperCAmelCase = 2
_UpperCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_UpperCAmelCase = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
_UpperCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ['hey']
_UpperCAmelCase = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
_UpperCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_UpperCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCAmelCase = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
_UpperCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 25
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 81920
_UpperCAmelCase = audio[77230:77240]
_UpperCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_UpperCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_UpperCAmelCase = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.get_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 81920
_UpperCAmelCase = audio[27780:27790]
_UpperCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_UpperCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 329 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 1 |
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 2
while i * i <= n:
_UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(a__ ) > 5_0_0:
break
return t_num
if __name__ == "__main__":
print(solution())
| 329 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase__ :Optional[int] = logging.get_logger(__name__)
class __a :
_a : str
_a : str = None
@staticmethod
def UpperCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def UpperCAmelCase__ ( cls ) -> Optional[Any]:
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class __a ( UpperCAmelCase ):
_a : Union[str, Any] = 'optuna'
@staticmethod
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
return is_optuna_available()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return run_hp_search_optuna(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return default_hp_space_optuna(_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : Tuple = 'ray'
_a : int = '\'ray[tune]\''
@staticmethod
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
return is_ray_available()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return run_hp_search_ray(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return default_hp_space_ray(_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : Optional[int] = 'sigopt'
@staticmethod
def UpperCAmelCase__ ( ) -> int:
"""simple docstring"""
return is_sigopt_available()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return run_hp_search_sigopt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return default_hp_space_sigopt(_SCREAMING_SNAKE_CASE )
class __a ( UpperCAmelCase ):
_a : Tuple = 'wandb'
@staticmethod
def UpperCAmelCase__ ( ) -> List[str]:
"""simple docstring"""
return is_wandb_available()
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return run_hp_search_wandb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return default_hp_space_wandb(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(a__ ) > 0:
_UpperCAmelCase = available_backends[0].name
if len(a__ ) > 1:
logger.info(
F'''{len(a__ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 329 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 1 |
import string
def lowerCAmelCase__ ( a__: str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_UpperCAmelCase = ''
for symbol in message:
if symbol in string.ascii_uppercase:
_UpperCAmelCase = string.ascii_uppercase.find(a__ )
_UpperCAmelCase = num - key
if num < 0:
_UpperCAmelCase = num + len(string.ascii_uppercase )
_UpperCAmelCase = translated + string.ascii_uppercase[num]
else:
_UpperCAmelCase = translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = input('Encrypted message: ' )
_UpperCAmelCase = message.upper()
decrypt(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 329 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def lowerCAmelCase__ ( *a__: Tuple ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(a__ , a__ ):
_UpperCAmelCase = list(a__ )
for i in range(len(a__ ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def lowerCAmelCase__ ( a__: Exception ) -> bool:
'''simple docstring'''
_UpperCAmelCase = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(a__ , a__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def lowerCAmelCase__ ( a__: callable = None , a__: int = 1_2_8 ) -> int:
'''simple docstring'''
if function is None:
return functools.partial(a__ , starting_batch_size=a__ )
_UpperCAmelCase = starting_batch_size
def decorator(*a__: Any , **a__: List[Any] ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(a__ ).parameters.keys() )
# Guard against user error
if len(a__ ) < (len(a__ ) + 1):
_UpperCAmelCase = ', '.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(a__ , *a__ , **a__ )
except Exception as e:
if should_reduce_batch_size(a__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 329 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 1 |
from math import isqrt, loga
def lowerCAmelCase__ ( a__: int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCAmelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__: int = 8_0_0_8_0_0 , a__: int = 8_0_0_8_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = degree * loga(a__ )
_UpperCAmelCase = int(a__ )
_UpperCAmelCase = calculate_prime_numbers(a__ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 1 |
def lowerCAmelCase__ ( a__: int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
'''simple docstring'''
try:
_UpperCAmelCase = int(a__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
_UpperCAmelCase = 1
_UpperCAmelCase = 2
while i * i <= n:
while n % i == 0:
_UpperCAmelCase = i
n //= i
i += 1
if n > 1:
_UpperCAmelCase = n
return int(a__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase__ :List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase__ :str = TaTokenizerFast
lowerCAmelCase__ :Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[int] = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Optional[int] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :str = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase__ :Tuple = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 329 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase__ ( a__: str , a__: str , a__: str , a__: PreTrainedTokenizer , a__: int , a__: Optional[int] = None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = {}
if train_file is not None:
_UpperCAmelCase = [train_file]
if eval_file is not None:
_UpperCAmelCase = [eval_file]
if test_file is not None:
_UpperCAmelCase = [test_file]
_UpperCAmelCase = datasets.load_dataset('csv' , data_files=a__ )
_UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCAmelCase = features_name.pop(a__ )
_UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCAmelCase = {label: i for i, label in enumerate(a__ )}
_UpperCAmelCase = tokenizer.model_input_names
_UpperCAmelCase = {}
if len(a__ ) == 1:
for k in files.keys():
_UpperCAmelCase = ds[k].map(
lambda a__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=a__ , max_length=a__ , padding='max_length' ) , batched=a__ , )
elif len(a__ ) == 2:
for k in files.keys():
_UpperCAmelCase = ds[k].map(
lambda a__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=a__ , max_length=a__ , padding='max_length' , ) , batched=a__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
a__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ :Any = logging.getLogger(__name__)
@dataclass
class __a :
_a : int = field(metadata={'help': 'Which column contains the label'} )
_a : str = field(default=UpperCAmelCase , metadata={'help': 'The path of the training file'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'The path of the development file'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'The path of the test file'} )
_a : int = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_a : bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __a :
_a : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_a : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=a__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(a__ ) , labelaid=a__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
def compute_metrics(a__: EvalPrediction ) -> Dict:
_UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCAmelCase = TFTrainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(a__ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(a__ )
return results
if __name__ == "__main__":
main()
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ :List[Any] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class __a ( UpperCAmelCase ):
_a : Union[PIL.Image.Image, np.ndarray]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if latents is None:
_UpperCAmelCase = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_UpperCAmelCase = latents.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=0 ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}''' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 329 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 1 |
from maths.prime_factors import prime_factors
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if not isinstance(a__ , a__ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a__ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1000 , ) -> str:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = LiltModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
_UpperCAmelCase = LiltForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(
_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a : int = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : int = False
_a : Any = False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return True
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
@slow
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_SCREAMING_SNAKE_CASE , bbox=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(outputs.last_hidden_state.shape , _SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 329 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 1 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = row, column
_UpperCAmelCase = [[default_value for c in range(_SCREAMING_SNAKE_CASE )] for r in range(_SCREAMING_SNAKE_CASE )]
def __str__( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_UpperCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , len(str(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = f'''%{max_element_length}s'''
# Make string and return
def single_line(_SCREAMING_SNAKE_CASE ) -> str:
nonlocal string_format_identifier
_UpperCAmelCase = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_SCREAMING_SNAKE_CASE ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
"""simple docstring"""
return str(self )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and len(_SCREAMING_SNAKE_CASE ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert self.validate_indicies(_SCREAMING_SNAKE_CASE )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert self.validate_indicies(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = value
def __add__( self , _SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert self.row == another.row and self.column == another.column
# Add
_UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase = -self[r, c]
return result
def __sub__( self , _SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self , _SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication
_UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase = self[r, c] * another
return result
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Matrix multiplication
assert self.column == another.row
_UpperCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_UpperCAmelCase = f'''Unsupported type given for another ({type(_SCREAMING_SNAKE_CASE )})'''
raise TypeError(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_UpperCAmelCase = self[r, c]
return result
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_UpperCAmelCase = v.transpose()
_UpperCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
_UpperCAmelCase = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
_UpperCAmelCase = Matrix(3 , 1 , 0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 2, -3
_UpperCAmelCase = Matrix(3 , 1 , 0 )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(a__ , a__ )}''' )
def lowerCAmelCase__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 329 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase__ :Union[str, Any] = TypeVar('''T''')
class __a ( Generic[T] ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
def __str__( self ) -> str:
"""simple docstring"""
return f'''{self.data}'''
class __a ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = None
def __iter__( self ) -> Iterator[T]:
"""simple docstring"""
_UpperCAmelCase = self.top
while node:
yield node.data
_UpperCAmelCase = node.next
def __str__( self ) -> str:
"""simple docstring"""
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __len__( self ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def UpperCAmelCase__ ( self ) -> bool:
"""simple docstring"""
return self.top is None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = Node(_SCREAMING_SNAKE_CASE )
if not self.is_empty():
_UpperCAmelCase = self.top
_UpperCAmelCase = node
def UpperCAmelCase__ ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.top
_UpperCAmelCase = self.top.next
return pop_node.data
def UpperCAmelCase__ ( self ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCAmelCase__ ( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 329 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 1 |
from collections.abc import Sequence
def lowerCAmelCase__ ( a__: Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
_UpperCAmelCase = nums[0]
for i in range(1 , len(a__ ) ):
_UpperCAmelCase = nums[i]
_UpperCAmelCase = max(a__ , ans + num , a__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ :Any = int(input('''Enter number of elements : ''').strip())
lowerCAmelCase__ :Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 329 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def UpperCAmelCase__ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __a ( unittest.TestCase ):
@require_torch
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_UpperCAmelCase = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@require_tf
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_UpperCAmelCase = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
[
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
{'score': 0.333, 'label': ANY(_SCREAMING_SNAKE_CASE )},
],
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_UpperCAmelCase = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_UpperCAmelCase = image_classifier(_SCREAMING_SNAKE_CASE , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 329 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_a : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : Any = False
_a : Optional[int] = False
_a : List[Any] = False
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 'add'
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 329 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Any = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCAmelCase__ ( a__: int , a__: int , a__: Optional[int] , a__: str , a__: Union[str, Any] ) -> str:
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(a__ , a__ )
if weight_type is not None:
_UpperCAmelCase = getattr(a__ , a__ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCAmelCase__ ( a__: List[str] , a__: Tuple , a__: Dict ) -> int:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
a__ , a__ , a__ , a__ , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(a__ )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , a__ )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "weight" in name:
_UpperCAmelCase = 'weight'
elif "bias" in name:
_UpperCAmelCase = 'bias'
else:
_UpperCAmelCase = None
set_recursively(a__ , a__ , a__ , a__ , a__ )
continue
if not is_used:
unused_weights.append(a__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCAmelCase__ ( a__: List[str] , a__: Optional[Any] , a__: List[Any] , a__: List[str] , a__: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a__ )
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: int ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = SEWConfig()
if is_finetuned:
_UpperCAmelCase = model.wav_encoder.wav_model.cfg
else:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.conv_bias
_UpperCAmelCase = eval(fs_config.conv_feature_layers )
_UpperCAmelCase = [x[0] for x in conv_layers]
_UpperCAmelCase = [x[1] for x in conv_layers]
_UpperCAmelCase = [x[2] for x in conv_layers]
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_UpperCAmelCase = 0.0
_UpperCAmelCase = fs_config.activation_fn.name
_UpperCAmelCase = fs_config.encoder_embed_dim
_UpperCAmelCase = 0.02
_UpperCAmelCase = fs_config.encoder_ffn_embed_dim
_UpperCAmelCase = 1e-5
_UpperCAmelCase = fs_config.encoder_layerdrop
_UpperCAmelCase = fs_config.encoder_attention_heads
_UpperCAmelCase = fs_config.conv_pos_groups
_UpperCAmelCase = fs_config.conv_pos
_UpperCAmelCase = len(a__ )
_UpperCAmelCase = fs_config.encoder_layers
_UpperCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.final_dropout
_UpperCAmelCase = fs_config.layerdrop
_UpperCAmelCase = fs_config.activation_dropout
_UpperCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCAmelCase = fs_config.attention_dropout
_UpperCAmelCase = fs_config.dropout_input
_UpperCAmelCase = fs_config.dropout
_UpperCAmelCase = fs_config.mask_channel_length
_UpperCAmelCase = fs_config.mask_channel_prob
_UpperCAmelCase = fs_config.mask_length
_UpperCAmelCase = fs_config.mask_prob
_UpperCAmelCase = 'Wav2Vec2FeatureExtractor'
_UpperCAmelCase = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any=None , a__: List[str]=None , a__: str=True ) -> Tuple:
'''simple docstring'''
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCAmelCase = SEWConfig.from_pretrained(a__ )
else:
_UpperCAmelCase = convert_config(model[0] , a__ )
_UpperCAmelCase = model[0].eval()
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=a__ , return_attention_mask=a__ , )
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(a__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(a__ , 'vocab.json' )
if not os.path.isdir(a__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a__ ) )
return
os.makedirs(a__ , exist_ok=a__ )
with open(a__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , a__ )
_UpperCAmelCase = WavaVecaCTCTokenizer(
a__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=a__ , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=a__ , tokenizer=a__ )
processor.save_pretrained(a__ )
_UpperCAmelCase = SEWForCTC(a__ )
else:
_UpperCAmelCase = SEWModel(a__ )
feature_extractor.save_pretrained(a__ )
recursively_load_weights(a__ , a__ , a__ )
hf_model.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 329 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 1 |
lowerCAmelCase__ :Optional[int] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__: str ) -> int:
'''simple docstring'''
_UpperCAmelCase = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_UpperCAmelCase = Stack()
_UpperCAmelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCAmelCase = operator_stack.peek()
operator_stack.pop()
_UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase__ :Any = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 329 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Optional[Any] = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'crop_pct' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 329 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase__ ( a__: Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
'''simple docstring'''
_UpperCAmelCase = []
if isinstance(a__ , a__ ):
for v in tree.values():
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(a__ ) )
elif isinstance(a__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowerCAmelCase__ ( a__: int , a__: Tuple[int, ...] ) -> Tuple[int, ...]:
'''simple docstring'''
_UpperCAmelCase = []
for d in reversed(a__ ):
idx.append(flat_idx % d )
_UpperCAmelCase = flat_idx // d
return tuple(reversed(a__ ) )
@torch.jit.ignore
def lowerCAmelCase__ ( a__: Sequence[int] , a__: Sequence[int] , a__: Sequence[int] , a__: Optional[Sequence[bool]] = None , a__: Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(a__: List[bool] ) -> None:
_UpperCAmelCase = True
for i in range(len(a__ ) ):
_UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCAmelCase = l[reversed_idx]
if start_edges is None:
_UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(a__ )
if end_edges is None:
_UpperCAmelCase = [e == (d - 1) for e, d in zip(a__ , a__ )]
reduce_edge_list(a__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(a__ ) == 0:
return [()]
elif len(a__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_UpperCAmelCase = []
_UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(a__ , a__ ):
if s == e:
path_list.append(slice(a__ , s + 1 ) )
else:
break
_UpperCAmelCase = tuple(a__ )
_UpperCAmelCase = len(a__ )
# start == end, and we're done
if divergence_idx == len(a__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(a__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(a__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCAmelCase__ ( a__: torch.Tensor , a__: int , a__: int , a__: int ) -> torch.Tensor:
'''simple docstring'''
_UpperCAmelCase = t.shape[:no_batch_dims]
_UpperCAmelCase = list(_flat_idx_to_idx(a__ , a__ ) )
# _get_minimal_slice_set is inclusive
_UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , a__ ) )
# Get an ordered list of slices to perform
_UpperCAmelCase = _get_minimal_slice_set(
a__ , a__ , a__ , )
_UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCAmelCase__ ( a__: Callable , a__: Dict[str, Any] , a__: int , a__: int , a__: bool = False , a__: Any = None , a__: bool = False , ) -> Any:
'''simple docstring'''
if not (len(a__ ) > 0):
raise ValueError('Must provide at least one input' )
_UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(a__ )]
_UpperCAmelCase = tuple([max(a__ ) for s in zip(*a__ )] )
def _prep_inputs(a__: torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCAmelCase = tensor_tree_map(_prep_inputs , a__ )
_UpperCAmelCase = None
if _out is not None:
_UpperCAmelCase = tensor_tree_map(lambda a__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(a__: torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCAmelCase = 0
_UpperCAmelCase = prepped_outputs
for _ in range(a__ ):
# Chunk the input
if not low_mem:
_UpperCAmelCase = _select_chunk
else:
_UpperCAmelCase = partial(
_chunk_slice , flat_start=a__ , flat_end=min(a__ , i + chunk_size ) , no_batch_dims=len(a__ ) , )
_UpperCAmelCase = tensor_tree_map(a__ , a__ )
# Run the layer on the chunk
_UpperCAmelCase = layer(**a__ )
# Allocate space for the output
if out is None:
_UpperCAmelCase = tensor_tree_map(lambda a__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , a__ )
# Put the chunk in its pre-allocated space
if isinstance(a__ , a__ ):
def assign(a__: dict , a__: dict ) -> None:
for k, v in da.items():
if isinstance(a__ , a__ ):
assign(a__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCAmelCase = da[k]
assign(a__ , a__ )
elif isinstance(a__ , a__ ):
for xa, xa in zip(a__ , a__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCAmelCase = xa
elif isinstance(a__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCAmelCase = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_UpperCAmelCase = tensor_tree_map(lambda a__ : t.view(orig_batch_dims + t.shape[1:] ) , a__ )
return out
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE = 512 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = max_chunk_size
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
_UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_SCREAMING_SNAKE_CASE ) -> bool:
try:
with torch.no_grad():
fn(*_SCREAMING_SNAKE_CASE , chunk_size=_SCREAMING_SNAKE_CASE )
return True
except RuntimeError:
return False
_UpperCAmelCase = 0
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1
while i > min_viable_chunk_size_index:
_UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
_UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
_UpperCAmelCase = i
_UpperCAmelCase = (i + len(_SCREAMING_SNAKE_CASE ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_UpperCAmelCase = True
for aa, aa in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert type(_SCREAMING_SNAKE_CASE ) == type(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
_UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda _SCREAMING_SNAKE_CASE : x[0] )]
consistent &= self._compare_arg_caches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = tree_map(lambda _SCREAMING_SNAKE_CASE : a.shape if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) else a , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , _SCREAMING_SNAKE_CASE )
else:
# Otherwise, we can reuse the precomputed value
_UpperCAmelCase = False
if not consistent:
_UpperCAmelCase = self._determine_favorable_chunk_size(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 329 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329 | 1 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ :Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :Dict = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[int] = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : List[str] = 'openai-gpt'
_a : int = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=40478 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE="cls_index" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = afn
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = summary_type
_UpperCAmelCase = summary_use_proj
_UpperCAmelCase = summary_activation
_UpperCAmelCase = summary_first_dropout
_UpperCAmelCase = summary_proj_to_labels
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
lowerCAmelCase__ :Optional[int] = {
'''meter''': '''m''',
'''kilometer''': '''km''',
'''megametre''': '''Mm''',
'''gigametre''': '''Gm''',
'''terametre''': '''Tm''',
'''petametre''': '''Pm''',
'''exametre''': '''Em''',
'''zettametre''': '''Zm''',
'''yottametre''': '''Ym''',
}
# Exponent of the factor(meter)
lowerCAmelCase__ :Dict = {
'''m''': 0,
'''km''': 3,
'''Mm''': 6,
'''Gm''': 9,
'''Tm''': 1_2,
'''Pm''': 1_5,
'''Em''': 1_8,
'''Zm''': 2_1,
'''Ym''': 2_4,
}
def lowerCAmelCase__ ( a__: float , a__: str , a__: str ) -> float:
'''simple docstring'''
_UpperCAmelCase = from_type.lower().strip('s' )
_UpperCAmelCase = to_type.lower().strip('s' )
_UpperCAmelCase = UNIT_SYMBOL.get(a__ , a__ )
_UpperCAmelCase = UNIT_SYMBOL.get(a__ , a__ )
if from_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(a__ )}'''
)
raise ValueError(a__ )
if to_sanitized not in METRIC_CONVERSION:
_UpperCAmelCase = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(a__ )}'''
)
raise ValueError(a__ )
_UpperCAmelCase = METRIC_CONVERSION[from_sanitized]
_UpperCAmelCase = METRIC_CONVERSION[to_sanitized]
_UpperCAmelCase = 1
if from_exponent > to_exponent:
_UpperCAmelCase = from_exponent - to_exponent
else:
_UpperCAmelCase = -(to_exponent - from_exponent)
return value * pow(1_0 , a__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 329 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' , [None, 'v2'] )
def lowerCAmelCase__ ( a__: Any , a__: Tuple , a__: Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = hf_hub_url(repo_id=a__ , path=a__ , revision=a__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(a__ )}'''
| 329 | 1 |
import os
from distutils.util import strtobool
def lowerCAmelCase__ ( a__: Tuple , a__: Tuple ) -> Any:
'''simple docstring'''
for e in env_keys:
_UpperCAmelCase = int(os.environ.get(a__ , -1 ) )
if val >= 0:
return val
return default
def lowerCAmelCase__ ( a__: Any , a__: List[Any]=False ) -> Any:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(a__ , str(a__ ) )
return strtobool(a__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def lowerCAmelCase__ ( a__: int , a__: Optional[int]="no" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = os.environ.get(a__ , str(a__ ) )
return value
| 329 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_UpperCAmelCase = 1_2_8
elif "12-12" in model_name:
_UpperCAmelCase = 1_2
_UpperCAmelCase = 1_2
elif "14-14" in model_name:
_UpperCAmelCase = 1_4
_UpperCAmelCase = 1_4
elif "16-16" in model_name:
_UpperCAmelCase = 1_6
_UpperCAmelCase = 1_6
else:
raise ValueError('Model not supported' )
_UpperCAmelCase = 'huggingface/label-files'
if "speech-commands" in model_name:
_UpperCAmelCase = 3_5
_UpperCAmelCase = 'speech-commands-v2-id2label.json'
else:
_UpperCAmelCase = 5_2_7
_UpperCAmelCase = 'audioset-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( a__: int ) -> Optional[Any]:
'''simple docstring'''
if "module.v" in name:
_UpperCAmelCase = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
_UpperCAmelCase = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
_UpperCAmelCase = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
_UpperCAmelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
_UpperCAmelCase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_UpperCAmelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_UpperCAmelCase = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
_UpperCAmelCase = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
_UpperCAmelCase = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCAmelCase__ ( a__: str , a__: Optional[Any] ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(a__ )
if "qkv" in key:
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = config.hidden_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[dim : dim * 2]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = val
return orig_state_dict
def lowerCAmelCase__ ( a__: List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Optional[int] , a__: int=False ) -> int:
'''simple docstring'''
_UpperCAmelCase = get_audio_spectrogram_transformer_config(a__ )
_UpperCAmelCase = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
_UpperCAmelCase = model_name_to_url[model_name]
_UpperCAmelCase = torch.hub.load_state_dict_from_url(a__ , map_location='cpu' )
# remove some keys
remove_keys(a__ )
# rename some keys
_UpperCAmelCase = convert_state_dict(a__ , a__ )
# load 🤗 model
_UpperCAmelCase = ASTForAudioClassification(a__ )
model.eval()
model.load_state_dict(a__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_UpperCAmelCase = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
_UpperCAmelCase = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
_UpperCAmelCase = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8
_UpperCAmelCase = ASTFeatureExtractor(mean=a__ , std=a__ , max_length=a__ )
if "speech-commands" in model_name:
_UpperCAmelCase = load_dataset('speech_commands' , 'v0.02' , split='validation' )
_UpperCAmelCase = dataset[0]['audio']['array']
else:
_UpperCAmelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
_UpperCAmelCase , _UpperCAmelCase = torchaudio.load(a__ )
_UpperCAmelCase = waveform.squeeze().numpy()
_UpperCAmelCase = feature_extractor(a__ , sampling_rate=1_6_0_0_0 , return_tensors='pt' )
# forward pass
_UpperCAmelCase = model(**a__ )
_UpperCAmelCase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_UpperCAmelCase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_UpperCAmelCase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_UpperCAmelCase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_UpperCAmelCase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_UpperCAmelCase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_UpperCAmelCase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_UpperCAmelCase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_UpperCAmelCase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , a__ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(a__ ).mkdir(exist_ok=a__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(a__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ :Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 329 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE , os.listdir(_SCREAMING_SNAKE_CASE )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.random.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_SCREAMING_SNAKE_CASE , use_memory_efficient_attention=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = replicate(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = shard(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = pipeline(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 329 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ :str = False
lowerCAmelCase__ :Any = True
lowerCAmelCase__ :List[str] = False
if __name__ == "__main__":
lowerCAmelCase__ :List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ :List[Any] = parser.parse_args()
lowerCAmelCase__ :Union[str, Any] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ :List[Any] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ :Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ :List[str] = reader.read()
lowerCAmelCase__ :int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ :Union[str, Any] = UNetaDModel(**config)
else:
lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ :Tuple = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ :int = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ :int = config[key]
del config[key]
lowerCAmelCase__ :Any = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ :Dict = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ :List[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ :Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ :int = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ :Any = param_value
lowerCAmelCase__ :Union[str, Any] = True
if not has_changed:
lowerCAmelCase__ :Optional[int] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 329 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: int | str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = str(a__ )
return n == n[::-1]
def lowerCAmelCase__ ( a__: int = 1_0_0_0_0_0_0 ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 0
for i in range(1 , a__ ):
if is_palindrome(a__ ) and is_palindrome(bin(a__ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 329 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCAmelCase__ :Optional[int] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
lowerCAmelCase__ :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
lowerCAmelCase__ :Optional[Any] = '''zero2'''
lowerCAmelCase__ :List[str] = '''zero3'''
lowerCAmelCase__ :List[Any] = [ZEROa, ZEROa]
def lowerCAmelCase__ ( a__: int , a__: str , a__: str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parameterized.to_safe_name('_'.join(str(a__ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowerCAmelCase__ :Dict = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __a ( UpperCAmelCase ):
@parameterized.expand(_SCREAMING_SNAKE_CASE , name_func=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
self.run_and_check(
stage=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(_SCREAMING_SNAKE_CASE , name_func=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , )
@parameterized.expand(_SCREAMING_SNAKE_CASE , name_func=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
self.run_and_check(
stage=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(_SCREAMING_SNAKE_CASE , name_func=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
self.run_and_check(
stage=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , distributed=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , ) -> str:
"""simple docstring"""
_UpperCAmelCase = models[model]
_UpperCAmelCase = self.run_trainer(
stage=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , eval_steps=_SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE , )
self.do_checks(_SCREAMING_SNAKE_CASE )
return output_dir
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir('./xxx' , after=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_SCREAMING_SNAKE_CASE )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase = f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_UpperCAmelCase = [f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_UpperCAmelCase = self.get_launcher(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() )
return output_dir
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 329 |
import math
lowerCAmelCase__ :Optional[int] = 1_0
lowerCAmelCase__ :Optional[Any] = 7
lowerCAmelCase__ :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCAmelCase__ ( a__: int = 2_0 ) -> str:
'''simple docstring'''
_UpperCAmelCase = math.comb(a__ , a__ )
_UpperCAmelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ )
_UpperCAmelCase = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 329 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase__ ( a__: List[str] ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(a__ , a__ )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(a__ , a__ , bias=a__ )
_UpperCAmelCase = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( a__: List[Any] , a__: Dict="facebook/mbart-large-en-ro" , a__: Dict=False , a__: List[str]=False ) -> Any:
'''simple docstring'''
_UpperCAmelCase = torch.load(a__ , map_location='cpu' )['model']
remove_ignore_keys_(a__ )
_UpperCAmelCase = state_dict['encoder.embed_tokens.weight'].shape[0]
_UpperCAmelCase = MBartConfig.from_pretrained(a__ , vocab_size=a__ )
if mbart_aa and finetuned:
_UpperCAmelCase = 'relu'
_UpperCAmelCase = state_dict['decoder.embed_tokens.weight']
_UpperCAmelCase = MBartForConditionalGeneration(a__ )
model.model.load_state_dict(a__ )
if finetuned:
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ :Optional[Any] = parser.parse_args()
lowerCAmelCase__ :int = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 329 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 1 |
def lowerCAmelCase__ ( a__: list ) -> bool:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(a__ ) == 1:
return True
_UpperCAmelCase = series[1] - series[0]
for index in range(len(a__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCAmelCase__ ( a__: list ) -> float:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(a__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_UpperCAmelCase = 0
for val in series:
answer += val
return answer / len(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[Any] , a__: Any ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoConfig.from_pretrained(a__ )
_UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=a__ )
_UpperCAmelCase = checkpoints.load_tax_checkpoint(a__ )
_UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
_UpperCAmelCase = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_UpperCAmelCase = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['encoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_global_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = tax_mlp_layer_norm
_UpperCAmelCase = flax_model_encoder_layer_block
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_encoder_global_rel_embedding
# Assigning
_UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale']
_UpperCAmelCase = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_UpperCAmelCase = F'''layers_{str(a__ )}'''
# Self-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
_UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel']
_UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
_UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
_UpperCAmelCase = flax_model.params['decoder']['block'][str(a__ )]['layer']
_UpperCAmelCase = tax_attention_key
_UpperCAmelCase = tax_attention_out
_UpperCAmelCase = tax_attention_query
_UpperCAmelCase = tax_attention_value
_UpperCAmelCase = tax_pre_attention_layer_norm
_UpperCAmelCase = tax_enc_dec_attention_key
_UpperCAmelCase = tax_enc_dec_attention_out
_UpperCAmelCase = tax_enc_dec_attention_query
_UpperCAmelCase = tax_enc_dec_attention_value
_UpperCAmelCase = tax_cross_layer_norm
if split_mlp_wi:
_UpperCAmelCase = tax_mlp_wi_a
_UpperCAmelCase = tax_mlp_wi_a
else:
_UpperCAmelCase = tax_mlp_wi
_UpperCAmelCase = tax_mlp_wo
_UpperCAmelCase = txa_mlp_layer_norm
_UpperCAmelCase = flax_model_decoder_layer_block
# Decoder Normalization
_UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale']
_UpperCAmelCase = txa_decoder_norm
# Only for layer 0:
_UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
_UpperCAmelCase = tax_decoder_rel_embedding
# Token Embeddings
_UpperCAmelCase = tax_model['target']['token_embedder']['embedding']
_UpperCAmelCase = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(a__ )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCAmelCase__ :List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 329 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 9
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_UpperCAmelCase = kruskal(a__ , a__ )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(a__ ) == sorted(a__ )
| 329 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ :List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ :Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __a ( UpperCAmelCase ):
_a : str = 'ctrl'
_a : Tuple = ['past_key_values']
_a : List[Any] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=246534 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1280 , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=48 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = dff
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
super().__init__(**_SCREAMING_SNAKE_CASE )
| 329 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase__ :int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class __a ( UpperCAmelCase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if prompt is not None:
_UpperCAmelCase = prompt
if generate_kwargs is not None:
_UpperCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_UpperCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_UpperCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
_UpperCAmelCase = load_image(_SCREAMING_SNAKE_CASE )
if prompt is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''Received an invalid text input, got - {type(_SCREAMING_SNAKE_CASE )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_UpperCAmelCase = self.model.config.model_type
if model_type == "git":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ).input_ids
_UpperCAmelCase = [self.tokenizer.cls_token_id] + input_ids
_UpperCAmelCase = torch.tensor(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , header_text=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
_UpperCAmelCase = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
_UpperCAmelCase = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_UpperCAmelCase = None
return model_inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> List[str]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _SCREAMING_SNAKE_CASE )
and all(x is None for x in model_inputs['input_ids'] )
):
_UpperCAmelCase = None
if generate_kwargs is None:
_UpperCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_UpperCAmelCase = model_inputs.pop(self.model.main_input_name )
_UpperCAmelCase = self.model.generate(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return model_outputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
for output_ids in model_outputs:
_UpperCAmelCase = {
'generated_text': self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , )
}
records.append(_SCREAMING_SNAKE_CASE )
return records
| 329 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : str = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 329 | 1 |
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __a ( unittest.TestCase ):
_a : List[str] = JukeboxTokenizer
_a : List[Any] = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
import torch
_UpperCAmelCase = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
_UpperCAmelCase = tokenizer(**self.metas )['input_ids']
# fmt: off
_UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 329 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase__ :Union[str, Any] = logging.get_logger(__name__)
class __a ( UpperCAmelCase ):
_a : Dict = ['input_values', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 16000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "hann_window" , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 7600 , _SCREAMING_SNAKE_CASE = 1e-1_0 , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = do_normalize
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = hop_length
_UpperCAmelCase = win_length
_UpperCAmelCase = win_function
_UpperCAmelCase = frame_signal_scale
_UpperCAmelCase = fmin
_UpperCAmelCase = fmax
_UpperCAmelCase = mel_floor
_UpperCAmelCase = reduction_factor
_UpperCAmelCase = win_length * sampling_rate // 1000
_UpperCAmelCase = hop_length * sampling_rate // 1000
_UpperCAmelCase = optimal_fft_length(self.sample_size )
_UpperCAmelCase = (self.n_fft // 2) + 1
_UpperCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _SCREAMING_SNAKE_CASE , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _SCREAMING_SNAKE_CASE , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
_UpperCAmelCase = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
_UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCAmelCase = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = spectrogram(
_SCREAMING_SNAKE_CASE , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
_UpperCAmelCase = self._process_audio(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
else:
_UpperCAmelCase = None
if audio_target is not None:
_UpperCAmelCase = self._process_audio(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if inputs is None:
return inputs_target
else:
_UpperCAmelCase = inputs_target['input_values']
_UpperCAmelCase = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
_UpperCAmelCase = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
_UpperCAmelCase = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_UpperCAmelCase = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
_UpperCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
_UpperCAmelCase = [self._extract_mel_features(_SCREAMING_SNAKE_CASE ) for waveform in speech]
_UpperCAmelCase = BatchFeature({'input_values': features} )
_UpperCAmelCase = self.num_mel_bins
else:
_UpperCAmelCase = BatchFeature({'input_values': speech} )
_UpperCAmelCase = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = feature_size_hack
# convert input values to correct format
_UpperCAmelCase = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
_UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_UpperCAmelCase = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = input_values.astype(np.floataa )
# convert attention_mask to correct format
_UpperCAmelCase = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_UpperCAmelCase = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_UpperCAmelCase = (
attention_mask
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_SCREAMING_SNAKE_CASE , padding_value=self.padding_value )
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def UpperCAmelCase__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
_UpperCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_UpperCAmelCase = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 329 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ :Optional[int] = logging.getLogger(__name__)
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a__ , default=1_0_0_0 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a__ , type=a__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a__ , default=5_1_2 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
_UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
def fn(a__: str ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase__ ( a__: List[str] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_UpperCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_UpperCAmelCase = tf.train.Features(feature=a__ )
_UpperCAmelCase = tf.train.Example(features=a__ )
_UpperCAmelCase = example.SerializeToString()
records.append(a__ )
return records
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_UpperCAmelCase = min(len(a__ ) , args.limit )
_UpperCAmelCase = dataset.select(range(a__ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
_UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_UpperCAmelCase = tokenize_function(a__ )
_UpperCAmelCase = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a__: Optional[int] ):
# Concatenate all texts.
_UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
_UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_UpperCAmelCase = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_0_0_0 , num_proc=4 )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
_UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_UpperCAmelCase = len(dataset_snapshot['input_ids'] )
_UpperCAmelCase = os.path.join(a__ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_UpperCAmelCase = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
_UpperCAmelCase = serialized_examples[i]
out_file.write(a__ )
print('Wrote file {} containing {} records'.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = parse_args()
main(args)
| 329 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
for _ in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for step in range(a__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(a__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a__ )
_UpperCAmelCase = torch.load(a__ )
scheduler.load_state_dict(a__ )
return lrs
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
_UpperCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , )
for _ in range(1000 ):
_UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __a ( unittest.TestCase ):
_a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None
_a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a : List[Any] = 10
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str:
"""simple docstring"""
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCAmelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_UpperCAmelCase , _UpperCAmelCase = data
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListAlmostEqual(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule
_UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = fn
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 329 | 1 |
from functools import reduce
lowerCAmelCase__ :List[str] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def lowerCAmelCase__ ( a__: str = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda a__ , a__ : str(int(a__ ) * int(a__ ) ) , n[i : i + 1_3] ) )
for i in range(len(a__ ) - 1_2 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 329 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase__ ( a__: int = 3 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if isinstance(a__ , a__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(a__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 1_0:
raise ValueError('number of qubits too large to simulate(>10).' )
_UpperCAmelCase = QuantumRegister(a__ , 'qr' )
_UpperCAmelCase = ClassicalRegister(a__ , 'cr' )
_UpperCAmelCase = QuantumCircuit(a__ , a__ )
_UpperCAmelCase = number_of_qubits
for i in range(a__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(a__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , a__ , a__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(a__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(a__ , a__ )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend('qasm_simulator' )
_UpperCAmelCase = execute(a__ , a__ , shots=1_0_0_0_0 )
return job.result().get_counts(a__ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 329 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase__ :Any = logging.get_logger(__name__)
@dataclass
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=6.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="fp4" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
_UpperCAmelCase = load_in_abit
_UpperCAmelCase = load_in_abit
_UpperCAmelCase = llm_inta_threshold
_UpperCAmelCase = llm_inta_skip_modules
_UpperCAmelCase = llm_inta_enable_fpaa_cpu_offload
_UpperCAmelCase = llm_inta_has_fpaa_weight
_UpperCAmelCase = bnb_abit_quant_type
_UpperCAmelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_UpperCAmelCase = torch.floataa
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.dtype ):
_UpperCAmelCase = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , _SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , _SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase__ ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = cls(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for key, value in kwargs.items():
if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
to_remove.append(_SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
_UpperCAmelCase = self.to_dict()
_UpperCAmelCase = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
writer.write(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE = True ) -> str:
"""simple docstring"""
if use_diff is True:
_UpperCAmelCase = self.to_diff_dict()
else:
_UpperCAmelCase = self.to_dict()
return json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + "\n"
def UpperCAmelCase__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.to_dict()
# get the default config dict
_UpperCAmelCase = BitsAndBytesConfig().to_dict()
_UpperCAmelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_UpperCAmelCase = value
return serializable_config_dict
| 329 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class __a ( UpperCAmelCase ):
_a : str = 'data2vec-text'
def __init__( self , _SCREAMING_SNAKE_CASE=30522 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-1_2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __a ( UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 329 | 1 |
lowerCAmelCase__ :str = 0 # The first color of the flag.
lowerCAmelCase__ :Dict = 1 # The second color of the flag.
lowerCAmelCase__ :str = 2 # The third color of the flag.
lowerCAmelCase__ :Optional[Any] = (red, white, blue)
def lowerCAmelCase__ ( a__: list ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(a__ ) == 1:
return list(a__ )
_UpperCAmelCase = 0
_UpperCAmelCase = len(a__ ) - 1
_UpperCAmelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
_UpperCAmelCase , _UpperCAmelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_UpperCAmelCase , _UpperCAmelCase = sequence[high], sequence[mid]
high -= 1
else:
_UpperCAmelCase = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(a__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :str = input('''Enter numbers separated by commas:\n''').strip()
lowerCAmelCase__ :Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 329 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329 | 1 |
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_SCREAMING_SNAKE_CASE )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 329 |
from collections.abc import Generator
def lowerCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = 0, 1
while True:
_UpperCAmelCase , _UpperCAmelCase = b, a + b
yield b
def lowerCAmelCase__ ( a__: int = 1_0_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = fibonacci_generator()
while len(str(next(a__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 329 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.