code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase_ ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 1_00 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ):
'''simple docstring'''
__lowercase = False
__lowercase = search_prob
__lowercase = start_temperate
__lowercase = []
__lowercase = 0
__lowercase = None
while not search_end:
__lowercase = current_state.score()
if best_state is None or current_score > best_state.score():
__lowercase = current_state
scores.append(__UpperCamelCase )
iterations += 1
__lowercase = None
__lowercase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__lowercase = random.randint(0 , len(__UpperCamelCase ) - 1 ) # picking a random neighbor
__lowercase = neighbors.pop(__UpperCamelCase )
__lowercase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__lowercase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__lowercase = picked_neighbor
else:
__lowercase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__lowercase = picked_neighbor
__lowercase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__lowercase = True
else:
__lowercase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__UpperCamelCase ) , __UpperCamelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
a : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
a : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : int = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 639 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> Tuple:
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("Input value must be an \'int\' type" )
lowerCAmelCase__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
def __UpperCAmelCase ( UpperCAmelCase = 1000000 )-> List[Any]:
"""simple docstring"""
lowercase = [i - 1 for i in range(limit + 1 )]
for i in range(2, limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i, limit + 1, __UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 604 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any:
snake_case__ = [1]
snake_case__ = 0, 0, 0
snake_case__ = ugly_nums[ia] * 2
snake_case__ = ugly_nums[ia] * 3
snake_case__ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
snake_case__ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
snake_case__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
snake_case__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
snake_case__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_0_0) = }""")
| 33 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : str, *_snake_case : str, **_snake_case : Tuple ) ->Optional[Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : Any, **_snake_case : str ) ->str:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : List[str], *_snake_case : Dict, **_snake_case : Any ) ->List[Any]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Tuple, *_snake_case : str, **_snake_case : Any ) ->Dict:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[str], *_snake_case : Tuple, **_snake_case : Any ) ->Optional[Any]:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Dict, *_snake_case : Any, **_snake_case : Optional[int] ) ->str:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Union[str, Any], *_snake_case : Optional[int], **_snake_case : Tuple ) ->Any:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[int], *_snake_case : List[Any], **_snake_case : Dict ) ->int:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : str, **_snake_case : Union[str, Any] ) ->int:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : int, *_snake_case : Optional[int], **_snake_case : Union[str, Any] ) ->List[str]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : int, **_snake_case : Tuple ) ->Any:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[int], *_snake_case : Tuple, **_snake_case : Dict ) ->str:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Optional[int], *_snake_case : str, **_snake_case : str ) ->List[Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : Optional[int], **_snake_case : int ) ->Any:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[int], *_snake_case : Tuple, **_snake_case : str ) ->Optional[int]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Union[str, Any], *_snake_case : List[Any], **_snake_case : int ) ->Tuple:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : str, **_snake_case : int ) ->Tuple:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : int, **_snake_case : Optional[int] ) ->Dict:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Union[str, Any], *_snake_case : str, **_snake_case : Tuple ) ->Optional[Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : str, **_snake_case : Any ) ->Dict:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : str, **_snake_case : Optional[Any] ) ->List[Any]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Union[str, Any], *_snake_case : Tuple, **_snake_case : Dict ) ->Optional[int]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : int, *_snake_case : Dict, **_snake_case : Optional[Any] ) ->List[str]:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : List[str], **_snake_case : Dict ) ->Tuple:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : List[str], *_snake_case : Tuple, **_snake_case : Optional[int] ) ->Union[str, Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : Optional[int], **_snake_case : List[str] ) ->Any:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : int, *_snake_case : str, **_snake_case : Optional[Any] ) ->Union[str, Any]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Any, *_snake_case : Any, **_snake_case : int ) ->List[Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : Union[str, Any], *_snake_case : List[Any], **_snake_case : str ) ->List[str]:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Any, *_snake_case : Union[str, Any], **_snake_case : Optional[int] ) ->Optional[int]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Tuple, *_snake_case : List[str], **_snake_case : int ) ->Union[str, Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : Tuple, *_snake_case : Optional[int], **_snake_case : Optional[Any] ) ->Dict:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : str, *_snake_case : Tuple, **_snake_case : List[Any] ) ->Optional[Any]:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : Tuple, *_snake_case : Dict, **_snake_case : int ) ->Optional[int]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : Dict, **_snake_case : Optional[int] ) ->Union[str, Any]:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : List[str], *_snake_case : List[str], **_snake_case : Optional[Any] ) ->Tuple:
requires_backends(cls, ['flax'] )
class snake_case__ ( metaclass=lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self : List[str], *_snake_case : List[str], **_snake_case : Optional[Any] ) ->Union[str, Any]:
requires_backends(self, ['flax'] )
@classmethod
def lowercase_ ( cls : List[Any], *_snake_case : Tuple, **_snake_case : int ) ->str:
requires_backends(cls, ['flax'] )
@classmethod
def lowercase_ ( cls : Optional[Any], *_snake_case : Optional[Any], **_snake_case : Any ) ->List[Any]:
requires_backends(cls, ['flax'] )
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
import numpy as np
_SCREAMING_SNAKE_CASE = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[str] ):
"""simple docstring"""
UpperCamelCase = np.array(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = np.where(letter == self.SQUARE )
UpperCamelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = message.lower()
UpperCamelCase = message.replace(""" """ , """""" )
UpperCamelCase = message.replace("""j""" , """i""" )
UpperCamelCase = np.empty((2, len(lowerCamelCase_ )) )
for letter_index in range(len(lowerCamelCase_ ) ):
UpperCamelCase = self.letter_to_numbers(message[letter_index] )
UpperCamelCase = numbers[0]
UpperCamelCase = numbers[1]
UpperCamelCase = first_step.reshape(2 * len(lowerCamelCase_ ) )
UpperCamelCase = ''
for numbers_index in range(len(lowerCamelCase_ ) ):
UpperCamelCase = int(second_step[numbers_index * 2] )
UpperCamelCase = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = encoded_message + letter
return encoded_message
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = message.lower()
message.replace(""" """ , """""" )
UpperCamelCase = np.empty(2 * len(lowerCamelCase_ ) )
for letter_index in range(len(lowerCamelCase_ ) ):
UpperCamelCase = self.letter_to_numbers(message[letter_index] )
UpperCamelCase = numbers[0]
UpperCamelCase = numbers[1]
UpperCamelCase = first_step.reshape((2, len(lowerCamelCase_ )) )
UpperCamelCase = ''
for numbers_index in range(len(lowerCamelCase_ ) ):
UpperCamelCase = int(second_step[0, numbers_index] )
UpperCamelCase = int(second_step[1, numbers_index] )
UpperCamelCase = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = decoded_message + letter
return decoded_message
| 537 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Union[str, Any] = StableDiffusionXLImgaImgPipeline
a__: Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a__: str = PipelineTesterMixin.required_optional_params - {'''latents'''}
a__: List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__: Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
a__: int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , )
lowerCamelCase_ = CLIPTextModel(UpperCAmelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase )
lowerCamelCase_ = CLIPTextModelWithProjection(UpperCAmelCase )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=UpperCAmelCase )
lowerCamelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=0 ):
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase_ = image / 2 + 0.5
if str(UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.7_5,
}
return inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = sd_pipe(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
lowerCamelCase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
# forward without prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = negative_prompt
lowerCamelCase_ = 3 * [inputs['prompt']]
lowerCamelCase_ = sd_pipe(**UpperCAmelCase )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase_ = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase_ = 3 * ['this is a negative prompt']
lowerCamelCase_ = 3 * [inputs.pop('''prompt''' )]
(
lowerCamelCase_
) = sd_pipe.encode_prompt(UpperCAmelCase , negative_prompt=UpperCAmelCase )
lowerCamelCase_ = sd_pipe(
**UpperCAmelCase , prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , pooled_prompt_embeds=UpperCAmelCase , negative_pooled_prompt_embeds=UpperCAmelCase , )
lowerCamelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ):
lowerCamelCase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase_ = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
lowerCamelCase_ = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase_ = self.get_inputs(UpperCAmelCase )
lowerCamelCase_ = pipe(**UpperCAmelCase ).images
lowerCamelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 29 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A = logging.get_logger(__name__)
A = "▁"
A = {"vocab_file": "sentencepiece.bpe.model"}
A = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
A = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
A = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= VOCAB_FILES_NAMES
A__= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__= PRETRAINED_VOCAB_FILES_MAP
A__= ['''input_ids''', '''attention_mask''']
A__= []
A__= []
def __init__( self : List[str] , _lowercase : Any , _lowercase : Tuple="<s>" , _lowercase : List[str]="</s>" , _lowercase : Union[str, Any]="</s>" , _lowercase : int="<s>" , _lowercase : Optional[Any]="<unk>" , _lowercase : List[Any]="<pad>" , _lowercase : int="<mask>" , _lowercase : Optional[int]=None , _lowercase : Dict=None , _lowercase : List[str]=None , _lowercase : Optional[int] = None , _lowercase : Optional[int]=None , **_lowercase : Tuple , ):
"""simple docstring"""
UpperCAmelCase__ = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
UpperCAmelCase__ = len(self.sp_model )
UpperCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase )
}
UpperCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase__ = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase__ = self.lang_code_to_id[self._src_lang]
UpperCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Any , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCAmelCase ( self : List[str] , _lowercase : Any , _lowercase : Optional[Any] = None , _lowercase : Dict = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
UpperCAmelCase__ = [1] * len(self.prefix_tokens )
UpperCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def _UpperCAmelCase ( self : str , _lowercase : Optional[int] , _lowercase : Optional[Any] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : Tuple , _lowercase : Dict , _lowercase : Union[str, Any] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Tuple , **_lowercase : List[str] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
UpperCAmelCase__ = self.convert_tokens_to_ids(_lowercase )
UpperCAmelCase__ = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[Any] ):
"""simple docstring"""
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : List[str] , _lowercase : Optional[Any] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = ''.join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Tuple , _lowercase : List[str] , _lowercase : Dict = None ):
"""simple docstring"""
if not os.path.isdir(_lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def _UpperCAmelCase ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any] = "en_XX" , _lowercase : Any = None , _lowercase : str = "ro_RO" , **_lowercase : str , ):
"""simple docstring"""
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.lang_code_to_id[src_lang]
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
def _UpperCAmelCase ( self : Any , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.lang_code_to_id[lang]
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
from __future__ import annotations
def lowercase ( _lowerCAmelCase ):
create_state_space_tree(__UpperCamelCase , [] , 0 , [0 for i in range(len(__UpperCamelCase ) )] )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if index == len(__UpperCamelCase ):
print(__UpperCamelCase )
return
for i in range(len(__UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase__ = True
create_state_space_tree(__UpperCamelCase , __UpperCamelCase , index + 1 , __UpperCamelCase )
current_sequence.pop()
UpperCAmelCase__ = False
snake_case__ : Optional[int] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : Optional[int] = ['''A''', '''B''', '''C''']
generate_all_permutations(sequence_a)
| 392 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
requires_backends(self ,'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None):
__lowerCamelCase : List[str] = {}
if top_k is not None:
__lowerCamelCase : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
return super().__call__(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Any = load_image(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors=self.framework)
return model_inputs
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : List[str] = self.model(**SCREAMING_SNAKE_CASE__)
return model_outputs
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]=5):
if top_k > self.model.config.num_labels:
__lowerCamelCase : int = self.model.config.num_labels
if self.framework == "pt":
__lowerCamelCase : str = model_outputs.logits.softmax(-1)[0]
__lowerCamelCase : str = probs.topk(SCREAMING_SNAKE_CASE__)
elif self.framework == "tf":
__lowerCamelCase : Optional[Any] = stable_softmax(model_outputs.logits ,axis=-1)[0]
__lowerCamelCase : int = tf.math.top_k(SCREAMING_SNAKE_CASE__ ,k=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}")
__lowerCamelCase : Optional[int] = scores.tolist()
__lowerCamelCase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)]
| 652 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
import sys
import turtle
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] ) -> Optional[Any]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : tuple[float, float] , UpperCAmelCase_ : int , ) -> Optional[Any]:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
triangle(__UpperCamelCase , get_mid(__UpperCamelCase , __UpperCamelCase ) , get_mid(__UpperCamelCase , __UpperCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 443 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from __future__ import annotations
import queue
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = data
__lowercase = None
__lowercase = None
def lowercase_ ( ):
'''simple docstring'''
print('''\n********Press N to stop entering at any point of time********\n''' )
__lowercase = input('''Enter the value of the root node: ''' ).strip().lower()
__lowercase = queue.Queue()
__lowercase = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
__lowercase = q.get()
__lowercase = F'Enter the left node of {node_found.data}: '
__lowercase = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowercase = TreeNode(int(__UpperCamelCase ) )
__lowercase = left_node
q.put(__UpperCamelCase )
__lowercase = F'Enter the right node of {node_found.data}: '
__lowercase = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowercase = TreeNode(int(__UpperCamelCase ) )
__lowercase = right_node
q.put(__UpperCamelCase )
raise
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
__lowercase = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
__lowercase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
__lowercase = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
__lowercase = []
while not q.empty():
__lowercase = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
__lowercase = []
__lowercase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__UpperCamelCase )
__lowercase = n.left
# end of while means current node doesn't have left child
__lowercase = stack.pop()
# start to traverse its right child
__lowercase = n.right
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
__lowercase = []
__lowercase = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
__lowercase = n.left
__lowercase = stack.pop()
print(n.data , end=''',''' )
__lowercase = n.right
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node:
return
__lowercase = [], []
__lowercase = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__lowercase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def lowercase_ ( _UpperCamelCase = "" , _UpperCamelCase=50 , _UpperCamelCase="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
__lowercase = divmod(width - len(__UpperCamelCase ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
a : Optional[Any] = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 639 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCamelCase , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = F"""class {class_name}("""
lowerCAmelCase__ = F"""{4 * " "}def {test_name}("""
lowerCAmelCase__ = F"""{8 * " "}{correct_line.split()[0]}"""
lowerCAmelCase__ = F"""{16 * " "}{correct_line.split()[0]}"""
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = []
for line in lines:
if line.startswith(__UpperCamelCase ):
lowerCAmelCase__ = True
elif in_class and line.startswith(__UpperCamelCase ):
lowerCAmelCase__ = True
elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )):
lowerCAmelCase__ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase__ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase__ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowerCAmelCase__ = False
else:
new_lines.append(__UpperCamelCase )
with open(__UpperCamelCase , "w" ) as f:
for line in new_lines:
f.write(__UpperCamelCase )
def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=None ) -> Dict:
'''simple docstring'''
if fail is not None:
with open(__UpperCamelCase , "r" ) as f:
lowerCAmelCase__ = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase__ = None
with open(__UpperCamelCase , "r" ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = defaultdict(__UpperCamelCase )
for line in correct_lines:
lowerCAmelCase__ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
UpperCAmelCase__ : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowercase ( nn.Module ):
lowercase = 42
lowercase = jnp.floataa
def __a ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Tuple ) -> str:
'''simple docstring'''
lowercase = hidden_states.shape
lowercase = jax.image.resize(
__lowerCamelCase , shape=(batch, height * 2, width * 2, channels) , method='''nearest''' , )
lowercase = self.conv(__lowerCamelCase )
return hidden_states
class __lowercase ( nn.Module ):
lowercase = 42
lowercase = jnp.floataa
def __a ( self : Any ) -> int:
'''simple docstring'''
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
lowercase = self.conv(__lowerCamelCase )
return hidden_states
class __lowercase ( nn.Module ):
lowercase = 42
lowercase = None
lowercase = 0.0
lowercase = None
lowercase = jnp.floataa
def __a ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase = self.in_channels if self.out_channels is None else self.out_channels
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = nn.Dense(__lowerCamelCase , dtype=self.dtype )
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Dropout(self.dropout_prob )
lowercase = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase = None
if use_nin_shortcut:
lowercase = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='''VALID''' , dtype=self.dtype , )
def __call__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Tuple=True ) -> List[Any]:
'''simple docstring'''
lowercase = hidden_states
lowercase = self.norma(__lowerCamelCase )
lowercase = nn.swish(__lowerCamelCase )
lowercase = self.conva(__lowerCamelCase )
lowercase = self.time_emb_proj(nn.swish(__lowerCamelCase ) )
lowercase = jnp.expand_dims(jnp.expand_dims(__lowerCamelCase , 1 ) , 1 )
lowercase = hidden_states + temb
lowercase = self.norma(__lowerCamelCase )
lowercase = nn.swish(__lowerCamelCase )
lowercase = self.dropout(__lowerCamelCase , __lowerCamelCase )
lowercase = self.conva(__lowerCamelCase )
if self.conv_shortcut is not None:
lowercase = self.conv_shortcut(__lowerCamelCase )
return hidden_states + residual
| 604 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase__ : List[Any] = False
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='''first prompt''' , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
snake_case__ = VersatileDiffusionPipeline.from_pretrained(_a , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = generator.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt='''first prompt''' , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
snake_case__ = 'cyberpunk 2077'
snake_case__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.dual_guided(
prompt=_a , image=_a , text_to_image_strength=0.75 , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = 'A painting of a squirrel eating a burger '
snake_case__ = torch.manual_seed(0 )
snake_case__ = pipe.text_to_image(
prompt=_a , generator=_a , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ = pipe.image_variation(_a , generator=_a , output_type='''numpy''' ).images
snake_case__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 33 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def lowercase_ ( self : List[Any] ) ->Union[str, Any]:
snake_case__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_snake_case, 'num_attention_heads' ) )
class snake_case__ :
"""simple docstring"""
def __init__( self : Optional[Any], _snake_case : Optional[int], _snake_case : Tuple=1_3, _snake_case : Union[str, Any]=6_4, _snake_case : Any=3, _snake_case : Any=3, _snake_case : Tuple=2, _snake_case : Optional[int]=1, _snake_case : Any=1_6, _snake_case : str=[1_2_8, 2_5_6, 3_8_4], _snake_case : str=[4, 6, 8], _snake_case : int=[2, 3, 4], _snake_case : Optional[int]=[1_6, 1_6, 1_6], _snake_case : Optional[Any]=0, _snake_case : Tuple=[2, 2, 2], _snake_case : Optional[Any]=[2, 2, 2], _snake_case : Dict=0.0_2, _snake_case : List[str]=True, _snake_case : Any=True, _snake_case : Optional[Any]=2, ) ->Dict:
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[Any] = kernel_size
snake_case__ : Tuple = stride
snake_case__ : Optional[int] = padding
snake_case__ : Union[str, Any] = hidden_sizes
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[Any] = depths
snake_case__ : Tuple = key_dim
snake_case__ : List[Any] = drop_path_rate
snake_case__ : Any = patch_size
snake_case__ : Any = attention_ratio
snake_case__ : List[str] = mlp_ratio
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case__ : str = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[Any] = num_labels
snake_case__ : List[Any] = initializer_range
def lowercase_ ( self : Optional[Any] ) ->Union[str, Any]:
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Dict = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_labels )
snake_case__ : str = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str ) ->List[Any]:
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase_ ( self : int, _snake_case : Union[str, Any], _snake_case : List[Any], _snake_case : int ) ->List[str]:
snake_case__ : Tuple = LevitModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = model(_snake_case )
snake_case__ : int = (self.image_size, self.image_size)
snake_case__ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : List[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case__ : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : int ) ->Union[str, Any]:
snake_case__ : int = self.num_labels
snake_case__ : Optional[int] = LevitForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : str ) ->Any:
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : str = config_and_inputs
snake_case__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : str ) ->str:
snake_case__ : List[Any] = LevitModelTester(self )
snake_case__ : Dict = ConfigTester(self, config_class=_snake_case, has_text_modality=_snake_case, hidden_size=3_7 )
def lowercase_ ( self : List[str] ) ->str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : Optional[Any] ) ->int:
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def lowercase_ ( self : Dict ) ->List[Any]:
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def lowercase_ ( self : Tuple ) ->List[str]:
pass
@unittest.skip(reason='Levit does not output attentions' )
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
pass
def lowercase_ ( self : Tuple ) ->Dict:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(_snake_case )
snake_case__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], _snake_case )
def lowercase_ ( self : Dict ) ->int:
def check_hidden_states_output(_snake_case : Tuple, _snake_case : Tuple, _snake_case : List[Any] ):
snake_case__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(_snake_case, _snake_case ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Dict = len(self.model_tester.depths ) + 1
self.assertEqual(len(_snake_case ), _snake_case )
snake_case__ : List[str] = (self.model_tester.image_size, self.model_tester.image_size)
snake_case__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : List[str] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case__ : str = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
check_hidden_states_output(_snake_case, _snake_case, _snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : List[str] ) ->Dict:
pass
def lowercase_ ( self : List[Any], _snake_case : Dict, _snake_case : str, _snake_case : int=False ) ->str:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self : List[str] ) ->Dict:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowercase_ ( self : List[Any] ) ->List[str]:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def lowercase_ ( self : Union[str, Any] ) ->Any:
if not self.model_tester.is_training:
return
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_snake_case )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
snake_case__ : Union[str, Any] = model(**_snake_case ).loss
loss.backward()
def lowercase_ ( self : Tuple ) ->List[Any]:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : Tuple = False
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_snake_case ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case__ : Union[str, Any] = model_class(_snake_case )
model.gradient_checkpointing_enable()
model.to(_snake_case )
model.train()
snake_case__ : Tuple = self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
snake_case__ : Any = model(**_snake_case ).loss
loss.backward()
def lowercase_ ( self : Optional[int] ) ->Dict:
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_snake_case ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case__ : List[str] = problem_type['title']
snake_case__ : Tuple = problem_type['num_labels']
snake_case__ : Tuple = model_class(_snake_case )
model.to(_snake_case )
model.train()
snake_case__ : Tuple = self._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if problem_type["num_labels"] > 1:
snake_case__ : List[Any] = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type['num_labels'] )
snake_case__ : Optional[int] = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_snake_case ) as warning_list:
snake_case__ : Optional[Any] = model(**_snake_case ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowercase_ ( self : Union[str, Any] ) ->Any:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = LevitModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def lowercase_ ():
snake_case__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : Any ) ->Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Optional[int] ) ->Tuple:
snake_case__ : str = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
snake_case__ : List[Any] = self.default_image_processor
snake_case__ : str = prepare_img()
snake_case__ : int = image_processor(images=_snake_case, return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
snake_case__ : List[str] = model(**_snake_case )
# verify the logits
snake_case__ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, _snake_case )
snake_case__ : List[str] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _snake_case, atol=1e-4 ) )
| 478 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-1"""
_SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-2"""
_SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-3"""
_SCREAMING_SNAKE_CASE = """CompVis/stable-diffusion-v1-4"""
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : int = True , ):
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
UpperCamelCase = StableDiffusionPipeline(
vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=lowerCamelCase_ , requires_safety_checker=lowerCamelCase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return {k: getattr(self , lowerCamelCase_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : int = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] = 512 , lowerCamelCase_ : Any = 512 , lowerCamelCase_ : Optional[int] = 50 , lowerCamelCase_ : Dict = 7.5 , lowerCamelCase_ : str = None , lowerCamelCase_ : Any = 1 , lowerCamelCase_ : Optional[int] = 0.0 , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Optional[Any] = "pil" , lowerCamelCase_ : Optional[Any] = True , lowerCamelCase_ : int = None , lowerCamelCase_ : Optional[int] = 1 , **lowerCamelCase_ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict = 512 , lowerCamelCase_ : int = 512 , lowerCamelCase_ : Union[str, Any] = 50 , lowerCamelCase_ : Union[str, Any] = 7.5 , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : str = 1 , lowerCamelCase_ : Optional[int] = 0.0 , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : int = "pil" , lowerCamelCase_ : Dict = True , lowerCamelCase_ : Any = None , lowerCamelCase_ : Any = 1 , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] = 512 , lowerCamelCase_ : List[Any] = 512 , lowerCamelCase_ : Optional[int] = 50 , lowerCamelCase_ : str = 7.5 , lowerCamelCase_ : str = None , lowerCamelCase_ : Dict = 1 , lowerCamelCase_ : int = 0.0 , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Any = "pil" , lowerCamelCase_ : str = True , lowerCamelCase_ : Dict = None , lowerCamelCase_ : List[Any] = 1 , **lowerCamelCase_ : int , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = 512 , lowerCamelCase_ : Dict = 512 , lowerCamelCase_ : Dict = 50 , lowerCamelCase_ : Optional[int] = 7.5 , lowerCamelCase_ : Any = None , lowerCamelCase_ : int = 1 , lowerCamelCase_ : List[Any] = 0.0 , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : int = None , lowerCamelCase_ : str = "pil" , lowerCamelCase_ : str = True , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Union[str, Any] = 1 , **lowerCamelCase_ : str , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
@torch.no_grad()
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] = 512 , lowerCamelCase_ : List[Any] = 512 , lowerCamelCase_ : Optional[Any] = 50 , lowerCamelCase_ : List[Any] = 7.5 , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : Optional[int] = 1 , lowerCamelCase_ : str = 0.0 , lowerCamelCase_ : int = None , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Tuple = "pil" , lowerCamelCase_ : str = True , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : List[Any] = 1 , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(lowerCamelCase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=lowerCamelCase_ , height=lowerCamelCase_ , width=lowerCamelCase_ , num_inference_steps=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , negative_prompt=lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , latents=lowerCamelCase_ , output_type=lowerCamelCase_ , return_dict=lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=lowerCamelCase_ , **lowerCamelCase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 537 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""OwlViTFeatureExtractor"""]
A_ = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
import os
import sys
import unittest
A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A = os.path.join(git_repo_path, "src", "diffusers")
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = find_backend(" if not is_torch_available():" )
self.assertEqual(_lowercase , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase__ = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(_lowercase , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase__ = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(_lowercase , "torch_and_transformers_and_onnx" )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , _lowercase )
self.assertIn("torch_and_transformers" , _lowercase )
self.assertIn("flax_and_transformers" , _lowercase )
self.assertIn("torch_and_transformers_and_onnx" , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(_lowercase , "\nCONSTANT = None\n" )
UpperCAmelCase__ = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
_lowercase , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
UpperCAmelCase__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
UpperCAmelCase__ = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
UpperCAmelCase__ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , _lowercase )
| 475 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
snake_case__ : List[str] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = set()
# keep track of all the paths to be checked
UpperCAmelCase__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCAmelCase__ = queue.pop(0 )
# get the last node from the path
UpperCAmelCase__ = path[-1]
if node not in explored:
UpperCAmelCase__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCAmelCase__ = list(__UpperCamelCase )
new_path.append(__UpperCamelCase )
queue.append(__UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCAmelCase__ = [start]
UpperCAmelCase__ = set(__UpperCamelCase )
# Keep tab on distances from `start` node.
UpperCAmelCase__ = {start: 0, target: -1}
while queue:
UpperCAmelCase__ = queue.pop(0 )
if node == target:
UpperCAmelCase__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__UpperCamelCase )
queue.append(__UpperCamelCase )
UpperCAmelCase__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 392 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a =logging.get_logger(__name__)
a ={"""vocab_file""": """spiece.model"""}
a ={
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
a ={
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Tuple = None ,**SCREAMING_SNAKE_CASE__ : Any ,):
__lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowerCamelCase : Optional[Any] = kwargs.get('name_or_path')
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored')
__lowerCamelCase : Dict = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__lowerCamelCase : Optional[Any] = '<|endoftext|>' if eos_token is None else eos_token
__lowerCamelCase : int = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__lowerCamelCase : Dict = unk_token if pad_token is None else pad_token
__lowerCamelCase : List[Any] = eos_token if bos_token is None else bos_token
else:
__lowerCamelCase : Optional[int] = '<pad>' if pad_token is None else pad_token
__lowerCamelCase : int = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE__ ,remove_space=SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Tuple = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : List[Any] = keep_accents
__lowerCamelCase : Tuple = vocab_file
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(SCREAMING_SNAKE_CASE__)
# Used for whitespace normalization in input texts
# fmt : off
__lowerCamelCase : Any = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__lowerCamelCase : str = re.compile(
F"[{''.join(map(SCREAMING_SNAKE_CASE__ ,list(range(0 ,9)) + list(range(1_1 ,3_2)) + list(range(1_2_7 ,1_6_0)) + [1_6_0, 1_7_3, 8_2_0_3]))}]")
def __getstate__( self : str):
__lowerCamelCase : str = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
return state
def __setstate__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : Optional[Any] = {}
__lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCAmelCase ( self : Any):
return len(self.sp_model)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : List[str] = self.non_printing_characters_re.sub('' ,SCREAMING_SNAKE_CASE__)
# Normalize whitespaces
__lowerCamelCase : Any = ''.join([char if char not in self.whitespaces else ' ' for char in text])
# NFC Unicode normalization
__lowerCamelCase : List[str] = unicodedata.normalize('NFC' ,SCREAMING_SNAKE_CASE__)
return text
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE__)
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__)
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : str):
return out_string
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = ''
__lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__) + token
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__)
return out_string
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
return (out_vocab_file,)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any = False):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = self.preprocess_text(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.sp_model.encode(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : List[Any] = [self.preprocess_text(SCREAMING_SNAKE_CASE__) for t in text]
__lowerCamelCase : Optional[Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__)
if return_tensors is True or return_tensors == "pt":
__lowerCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__)
return token_ids
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.decode(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : str = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()]
__lowerCamelCase : Any = (
F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(SCREAMING_SNAKE_CASE__) + F"{self.bos_token}Bot:"
)
return self.encode(text=SCREAMING_SNAKE_CASE__)
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowercase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( A ):
def __init__( self , *__A , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
super().__init__(*__A , **__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =eval_examples
SCREAMING_SNAKE_CASE_ : List[Any] =post_process_function
SCREAMING_SNAKE_CASE_ : List[Any] =quant_trainer_args
SCREAMING_SNAKE_CASE_ : Tuple =128 # default number of calibration samples
def _snake_case ( self , __A=None ) -> str:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
SCREAMING_SNAKE_CASE_ : List[str] =calib_dataset if calib_dataset is not None else self.calib_dataset
SCREAMING_SNAKE_CASE_ : Tuple =self._remove_unused_columns(__A , description='''Calibration''' )
return DataLoader(
__A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=__A , )
def _snake_case ( self , __A=None ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.train_dataset if calib_dataset is None else calib_dataset
SCREAMING_SNAKE_CASE_ : Any =self.get_calib_dataloader(__A )
SCREAMING_SNAKE_CASE_ : List[Any] =self.model
quant_trainer.configure_model(__A , self.quant_trainer_args , calib=__A )
model.eval()
quant_trainer.enable_calibration(__A )
logger.info('''***** Running calibration *****''' )
logger.info(F' Num examples = {self.calib_num}' )
logger.info(F' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(__A ):
# Prediction step
SCREAMING_SNAKE_CASE_ : List[str] =self.prediction_step(__A , __A , prediction_loss_only=__A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(__A , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : List[Any] =model
def _snake_case ( self , __A=None , __A=None , __A=None , __A = "eval" ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.eval_dataset if eval_dataset is None else eval_dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_eval_dataloader(__A )
SCREAMING_SNAKE_CASE_ : Dict =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : int =self.compute_metrics
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : List[Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : List[str] =eval_loop(
__A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
SCREAMING_SNAKE_CASE_ : List[str] =compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
SCREAMING_SNAKE_CASE_ : Tuple =self.post_process_function(__A , __A , output.predictions )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : List[Any] =metrics.pop(__A )
self.log(__A )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] ={}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
SCREAMING_SNAKE_CASE_ : Dict =self.callback_handler.on_evaluate(self.args , self.state , self.control , __A )
return metrics
def _snake_case ( self , __A , __A , __A=None , __A = "test" ) -> int:
SCREAMING_SNAKE_CASE_ : str =self.get_test_dataloader(__A )
# Temporarily disable metric computation, we will do it in the loop here.
SCREAMING_SNAKE_CASE_ : List[str] =self.compute_metrics
SCREAMING_SNAKE_CASE_ : str =None
SCREAMING_SNAKE_CASE_ : Dict =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
SCREAMING_SNAKE_CASE_ : int =eval_loop(
__A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__A , )
finally:
SCREAMING_SNAKE_CASE_ : Tuple =compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
SCREAMING_SNAKE_CASE_ : int =self.post_process_function(__A , __A , output.predictions , '''predict''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.compute_metrics(__A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'{metric_key_prefix}_' ):
SCREAMING_SNAKE_CASE_ : Optional[int] =metrics.pop(__A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__A )
def _snake_case ( self , __A="./" ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Tuple =self.eval_dataset
SCREAMING_SNAKE_CASE_ : int =self.get_eval_dataloader(__A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =next(iter(__A ) )
# saving device - to make it consistent
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
SCREAMING_SNAKE_CASE_ : Optional[int] =tuple(v.to(__A ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
SCREAMING_SNAKE_CASE_ : List[str] =True
SCREAMING_SNAKE_CASE_ : Dict =self.model.to(__A )
model.eval()
model.float()
SCREAMING_SNAKE_CASE_ : int =model.module if hasattr(__A , '''module''' ) else model
quant_trainer.configure_model(__A , self.quant_trainer_args )
SCREAMING_SNAKE_CASE_ : Any =os.path.join(__A , '''model.onnx''' )
logger.info(F'exporting model to {output_model_file}' )
SCREAMING_SNAKE_CASE_ : List[str] ={0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
__A , __A , __A , export_params=__A , opset_version=13 , do_constant_folding=__A , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=__A , )
logger.info('''onnx export finished''' )
| 443 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
from __future__ import annotations
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , __UpperCamelCase )
print('''Decoded:''' , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 639 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
'''simple docstring'''
def A ( UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowerCAmelCase__ = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowercase ( yaml.SafeLoader ):
def __a ( self : Dict , __lowerCamelCase : Tuple ) -> str:
'''simple docstring'''
lowercase = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase = [tuple(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else key for key in keys]
lowercase = Counter(__lowerCamelCase )
lowercase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def __a ( self : str , __lowerCamelCase : int , __lowerCamelCase : Dict=False ) -> Optional[Any]:
'''simple docstring'''
lowercase = super().construct_mapping(__lowerCamelCase , deep=__lowerCamelCase )
self._check_no_duplicates_on_constructed_node(__lowerCamelCase )
return mapping
def __UpperCAmelCase ( UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
lowercase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase = full_content[1:].index('''---''' ) + 1
lowercase = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__UpperCamelCase )
class __lowercase ( _A ):
lowercase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def __a ( cls : Dict , __lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
lowercase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowerCamelCase )
else:
return cls()
def __a ( self : str , __lowerCamelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
if path.exists():
with open(__lowerCamelCase , encoding='''utf-8''' ) as readme_file:
lowercase = readme_file.read()
else:
lowercase = None
lowercase = self._to_readme(__lowerCamelCase )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowerCamelCase )
def __a ( self : str , __lowerCamelCase : List[Any] = None ) -> List[Any]:
'''simple docstring'''
if readme_content is not None:
lowercase = _split_yaml_from_readme(__lowerCamelCase )
lowercase = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __a ( cls : int , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = yaml.load(__lowerCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowerCamelCase )
def __a ( self : str ) -> Dict:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowerCamelCase , allow_unicode=__lowerCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
A_ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
A_ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
A_ = ap.parse_args()
A_ = Path(args.readme_filepath)
A_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 604 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase__ : str = """."""
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase__ : int = [
"""Assert""",
"""AssignVariableOp""",
"""EmptyTensorList""",
"""MergeV2Checkpoints""",
"""ReadVariableOp""",
"""ResourceGather""",
"""RestoreV2""",
"""SaveV2""",
"""ShardedFilename""",
"""StatefulPartitionedCall""",
"""StaticRegexFullMatch""",
"""VarHandleOp""",
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
snake_case__ = SavedModel()
snake_case__ = []
with open(os.path.join(__UpperCamelCase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
snake_case__ = json.load(__UpperCamelCase )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__UpperCamelCase )] )
with open(__UpperCamelCase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ = sorted(__UpperCamelCase )
snake_case__ = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__UpperCamelCase )
if strict and len(__UpperCamelCase ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__UpperCamelCase ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__UpperCamelCase , sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
lowerCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""")
parser.add_argument(
"""--opset""", default=1_2, type=int, help="""The ONNX opset against which the model has to be tested."""
)
parser.add_argument(
"""--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model."""
)
parser.add_argument(
"""--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)"""
)
lowerCamelCase__ : Any = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 33 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
a_ :Optional[Any] = get_tests_dir("fixtures")
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Optional[Any] = mock.Mock()
snake_case__ : Tuple = 5_0_0
snake_case__ : int = {}
snake_case__ : Union[str, Any] = HTTPError
snake_case__ : Tuple = {}
# Download this model to make sure it's in the cache.
snake_case__ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=_snake_case ) as mock_head:
snake_case__ : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : str ) ->List[str]:
snake_case__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowercase_ ( cls : List[Any] ) ->Tuple:
snake_case__ : int = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def lowercase_ ( cls : str ) ->Any:
try:
delete_repo(token=cls._token, repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def lowercase_ ( self : int ) ->Any:
snake_case__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_snake_case )
feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token )
snake_case__ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_snake_case, getattr(_snake_case, _snake_case ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_snake_case, repo_id='test-feature-extractor', push_to_hub=_snake_case, use_auth_token=self._token )
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_snake_case, getattr(_snake_case, _snake_case ) )
def lowercase_ ( self : Any ) ->Optional[Any]:
snake_case__ : Dict = WavaVecaFeatureExtractor.from_pretrained(_snake_case )
feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token )
snake_case__ : int = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_snake_case, getattr(_snake_case, _snake_case ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_snake_case, repo_id='valid_org/test-feature-extractor-org', push_to_hub=_snake_case, use_auth_token=self._token )
snake_case__ : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_snake_case, getattr(_snake_case, _snake_case ) )
def lowercase_ ( self : Any ) ->Optional[int]:
CustomFeatureExtractor.register_for_auto_class()
snake_case__ : str = CustomFeatureExtractor.from_pretrained(_snake_case )
feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, )
snake_case__ : Any = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''', trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : List[str]=7 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=99 , lowerCamelCase_ : int=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : List[Any]=512 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Union[str, Any]=0.0_2 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : int=4 , lowerCamelCase_ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = self.vocab_size - 1
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , *lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
UpperCamelCase = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , *lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=False ):
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
UpperCamelCase = inputs_dict['labels']
UpperCamelCase = inputs_dict['labels']
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = OpenAIGPTModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(lowerCamelCase_ )
UpperCamelCase = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
UpperCamelCase = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCamelCase = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ )
| 537 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=[10, 20, 30, 40] , UpperCAmelCase=[2, 2, 3, 2] , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=10 , UpperCAmelCase=0.0_2 , UpperCAmelCase=["stage2", "stage3", "stage4"] , UpperCAmelCase=3 , UpperCAmelCase=None , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_stages
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = out_features
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = num_stages
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase__ ( self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=UpperCAmelCase , loss_ignore_index=255 , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = UperNetForSemanticSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
lowerCamelCase_
) = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a__: Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
a__: List[Any] = False
a__: Tuple = False
a__: Optional[Any] = False
a__: str = False
a__: List[str] = False
a__: int = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = UperNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ):
return
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(UpperCAmelCase )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCAmelCase__ ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCAmelCase__ ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
def check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = _config_zero_init(UpperCAmelCase )
lowerCamelCase_ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def UpperCAmelCase__ ( self ):
pass
@slow
def UpperCAmelCase__ ( self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase ( ):
lowerCamelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' ,repo_type='''dataset''' ,filename='''ADE_val_00000001.jpg''' )
lowerCamelCase_ = Image.open(__UpperCamelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(UpperCAmelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
lowerCamelCase_ = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(UpperCAmelCase )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''pt''' ).to(UpperCAmelCase )
with torch.no_grad():
lowerCamelCase_ = model(**UpperCAmelCase )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCamelCase_ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 29 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__UpperCamelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__UpperCamelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__UpperCamelCase )
return parser.parse_args()
def __UpperCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = parse_args()
# Import training_script as a module.
UpperCAmelCase__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase__ = script_fpath.stem
UpperCAmelCase__ = importlib.import_module(__UpperCamelCase )
# Patch sys.argv
UpperCAmelCase__ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=0 ):
UpperCAmelCase__ = []
for old_item in old_list:
UpperCAmelCase__ = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCAmelCase__ = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCAmelCase__ = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCAmelCase__ = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCAmelCase__ = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCAmelCase__ = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCAmelCase__ = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase ( _lowerCAmelCase , _lowerCAmelCase=0 ):
UpperCAmelCase__ = []
for old_item in old_list:
UpperCAmelCase__ = old_item
UpperCAmelCase__ = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCAmelCase__ = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCAmelCase__ = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCAmelCase__ = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCAmelCase__ = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ):
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase__ = old_checkpoint[path]
UpperCAmelCase__ = old_tensor.shape[0] // 3
UpperCAmelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase__ = old_tensor.shape[0] // config['num_head_channels'] // 3
UpperCAmelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase__ = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase__ = query.reshape(__UpperCamelCase )
UpperCAmelCase__ = key.reshape(__UpperCamelCase )
UpperCAmelCase__ = value.reshape(__UpperCamelCase )
for path in paths:
UpperCAmelCase__ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase__ = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCAmelCase__ = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCAmelCase__ = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase__ = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase__ = old_checkpoint[path['old']][:, :, 0]
else:
UpperCAmelCase__ = old_checkpoint[path['old']]
def lowercase ( _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = checkpoint['time_embed.0.weight']
UpperCAmelCase__ = checkpoint['time_embed.0.bias']
UpperCAmelCase__ = checkpoint['time_embed.2.weight']
UpperCAmelCase__ = checkpoint['time_embed.2.bias']
UpperCAmelCase__ = checkpoint['input_blocks.0.0.weight']
UpperCAmelCase__ = checkpoint['input_blocks.0.0.bias']
UpperCAmelCase__ = checkpoint['out.0.weight']
UpperCAmelCase__ = checkpoint['out.0.bias']
UpperCAmelCase__ = checkpoint['out.2.weight']
UpperCAmelCase__ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
UpperCAmelCase__ = (i - 1) // (config['num_res_blocks'] + 1)
UpperCAmelCase__ = (i - 1) % (config['num_res_blocks'] + 1)
UpperCAmelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
UpperCAmelCase__ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
UpperCAmelCase__ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
UpperCAmelCase__ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase )
UpperCAmelCase__ = {'old': F'''input_blocks.{i}.0''', 'new': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
UpperCAmelCase__ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase )
UpperCAmelCase__ = {
'old': F'''input_blocks.{i}.1''',
'new': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCAmelCase__ = {
F'''input_blocks.{i}.1.qkv.bias''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
'key': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
UpperCAmelCase__ = middle_blocks[0]
UpperCAmelCase__ = middle_blocks[1]
UpperCAmelCase__ = middle_blocks[2]
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase )
UpperCAmelCase__ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
UpperCAmelCase__ = i // (config['num_res_blocks'] + 1)
UpperCAmelCase__ = i % (config['num_res_blocks'] + 1)
UpperCAmelCase__ = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
UpperCAmelCase__ = {}
for layer in output_block_layers:
UpperCAmelCase__ = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
UpperCAmelCase__ = [layer_name]
if len(__UpperCamelCase ) > 1:
UpperCAmelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
UpperCAmelCase__ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase )
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase )
UpperCAmelCase__ = {'old': F'''output_blocks.{i}.0''', 'new': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase__ = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCAmelCase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
UpperCAmelCase__ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
UpperCAmelCase__ = []
if len(__UpperCamelCase ):
UpperCAmelCase__ = renew_attention_paths(__UpperCamelCase )
UpperCAmelCase__ = {
'old': F'''output_blocks.{i}.1''',
'new': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCAmelCase__ = {
F'''output_blocks.{i}.1.qkv.bias''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
'key': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
'query': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
'value': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
UpperCAmelCase__ = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase__ = '.'.join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
UpperCAmelCase__ = '.'.join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
UpperCAmelCase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
snake_case__ : Dict = parser.parse_args()
snake_case__ : Any = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
snake_case__ : str = json.loads(f.read())
snake_case__ : Dict = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
snake_case__ : Tuple = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
snake_case__ : Union[str, Any] = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case__ : Union[str, Any] = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
snake_case__ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 392 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
_lowercase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
_lowercase = [ord(letter) for letter in string.ascii_lowercase]
_lowercase = {ord(char) for char in VALID_CHARS}
_lowercase = ["""the""", """be""", """to""", """of""", """and""", """in""", """that""", """have"""]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, ...] ) -> str:
SCREAMING_SNAKE_CASE_ : str =""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
for keychar, cipherchar in zip(cycle(__UpperCamelCase ) , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCamelCase )
return decoded
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int] ) -> List[str]:
SCREAMING_SNAKE_CASE_ : list[str] =[]
for key in product(__UpperCamelCase , repeat=3 ):
SCREAMING_SNAKE_CASE_ : List[str] =try_key(__UpperCamelCase , __UpperCamelCase )
if encoded is not None:
possibles.append(__UpperCamelCase )
return possibles
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[str] , UpperCAmelCase_ : str ) -> Optional[Any]:
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str = "p059_cipher.txt" ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : list[int]
SCREAMING_SNAKE_CASE_ : list[str]
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : str =Path(__UpperCamelCase ).parent.joinpath(__UpperCamelCase ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE_ : Tuple =[int(__UpperCamelCase ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE_ : Dict =filter_valid_chars(__UpperCamelCase )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE_ : List[Any] =filter_common_word(__UpperCamelCase , __UpperCamelCase )
if len(__UpperCamelCase ) == 1:
break
SCREAMING_SNAKE_CASE_ : Optional[int] =possibles[0]
return sum(ord(__UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"{solution() = }")
| 443 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
import unittest
from knapsack import greedy_knapsack as kp
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
__lowercase = [2, 4, 6, 8, 1_0, 1_2]
__lowercase = 1_0_0
self.assertEqual(kp.calc_profit(snake_case_ , snake_case_ , snake_case_ ) , 2_1_0 )
def A ( self ) -> int:
'''simple docstring'''
self.assertRaisesRegex(snake_case_ , '''max_weight must greater than zero.''' )
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertRaisesRegex(snake_case_ , '''Weight can not be negative.''' )
def A ( self ) -> List[str]:
'''simple docstring'''
self.assertRaisesRegex(snake_case_ , '''Profit can not be negative.''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
self.assertRaisesRegex(snake_case_ , '''max_weight must greater than zero.''' )
def A ( self ) -> str:
'''simple docstring'''
self.assertRaisesRegex(
snake_case_ , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main()
| 639 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class A ( logging.LoggerAdapter ):
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , *__magic_name__ : int , **__magic_name__ : int ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
lowerCAmelCase__ = kwargs.pop("main_process_only" , __magic_name__ )
lowerCAmelCase__ = kwargs.pop("in_order" , __magic_name__ )
if self.isEnabledFor(__magic_name__ ):
if self._should_log(__magic_name__ ):
lowerCAmelCase__ = self.process(__magic_name__ , __magic_name__ )
self.logger.log(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
elif in_order:
lowerCAmelCase__ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase__ = self.process(__magic_name__ , __magic_name__ )
self.logger.log(__magic_name__ , __magic_name__ , *__magic_name__ , **__magic_name__ )
state.wait_for_everyone()
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str = None ) -> Tuple:
'''simple docstring'''
if log_level is None:
lowerCAmelCase__ = os.environ.get("ACCELERATE_LOG_LEVEL" , __UpperCamelCase )
lowerCAmelCase__ = logging.getLogger(__UpperCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__UpperCamelCase , {} )
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> str:
"""simple docstring"""
if height >= 1:
move_tower(height - 1, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
move_disk(__UpperCamelCase, __UpperCamelCase )
move_tower(height - 1, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
print('''moving disk from''', __UpperCamelCase, '''to''', __UpperCamelCase )
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
lowercase = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__UpperCamelCase, '''A''', '''B''', '''C''' )
if __name__ == "__main__":
main()
| 604 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase__ : Dict = random.Random()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Any:
if rng is None:
snake_case__ = global_rng
snake_case__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:str , _a:str , _a:Optional[Any]=7 , _a:List[Any]=4_00 , _a:Any=20_00 , _a:Any=1 , _a:Tuple=0.0 , _a:str=1_60_00 , _a:Union[str, Any]=True , _a:Any=80 , _a:List[str]=16 , _a:Tuple=64 , _a:List[str]="hann_window" , _a:Union[str, Any]=80 , _a:str=76_00 , _a:int=1e-10 , _a:Union[str, Any]=True , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = min_seq_length
snake_case__ = max_seq_length
snake_case__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ = feature_size
snake_case__ = padding_value
snake_case__ = sampling_rate
snake_case__ = do_normalize
snake_case__ = num_mel_bins
snake_case__ = hop_length
snake_case__ = win_length
snake_case__ = win_function
snake_case__ = fmin
snake_case__ = fmax
snake_case__ = mel_floor
snake_case__ = return_attention_mask
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE__ ( self:int , _a:List[Any]=False , _a:str=False ):
def _flatten(_a:Union[str, Any] ):
return list(itertools.chain(*_a ) )
if equal_length:
snake_case__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Any=False , _a:Optional[Any]=False ):
if equal_length:
snake_case__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : List[Any] = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Optional[int] ):
self.assertTrue(np.all(np.mean(_a , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
snake_case__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values
snake_case__ = feat_extract(_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = ['longest', 'max_length', 'do_not_pad']
snake_case__ = [None, 16_00, None]
for max_length, padding in zip(_a , _a ):
snake_case__ = feat_extract(_a , padding=_a , max_length=_a , return_tensors='''np''' )
snake_case__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = range(8_00 , 14_00 , 2_00 )
snake_case__ = [floats_list((1, x) )[0] for x in lengths]
snake_case__ = ['longest', 'max_length', 'do_not_pad']
snake_case__ = [None, 16_00, None]
for max_length, padding in zip(_a , _a ):
snake_case__ = feat_extract(_a , max_length=_a , padding=_a )
snake_case__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = feat_extract(
_a , truncation=_a , max_length=10_00 , padding='''max_length''' , return_tensors='''np''' )
snake_case__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = feat_extract(
_a , truncation=_a , max_length=10_00 , padding='''longest''' , return_tensors='''np''' )
snake_case__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = feat_extract(
_a , truncation=_a , max_length=20_00 , padding='''longest''' , return_tensors='''np''' )
snake_case__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ = np.random.rand(1_00 ).astype(np.floataa )
snake_case__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case__ = [np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
snake_case__ = feature_extractor(audio_target=_a , padding=_a , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
snake_case__ = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
snake_case__ = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
snake_case__ = feature_extractor(_a , return_tensors='''np''' ).input_values
snake_case__ = feature_extractor(_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case__ = np.asarray(_a )
snake_case__ = feature_extractor(_a , return_tensors='''np''' ).input_values
snake_case__ = feature_extractor(_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ = feat_extract.model_input_names[0]
snake_case__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a , processed_features[input_name] ) ) )
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
snake_case__ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
snake_case__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_a )
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ = feat_extract.model_input_names[0]
snake_case__ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
snake_case__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
snake_case__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = self.feature_extraction_class(**self.feat_extract_dict )
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ = feat_extract.model_input_names[0]
snake_case__ = BatchFeature({input_name: speech_inputs} )
snake_case__ = feat_extract.num_mel_bins # hack!
snake_case__ = feat_extract.pad(_a , padding='''longest''' , return_tensors='''np''' )[input_name]
snake_case__ = feat_extract.pad(_a , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.feat_extract_dict
snake_case__ = True
snake_case__ = self.feature_extraction_class(**_a )
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ = [len(_a ) for x in speech_inputs]
snake_case__ = feat_extract.model_input_names[0]
snake_case__ = BatchFeature({input_name: speech_inputs} )
snake_case__ = feat_extract.num_mel_bins # hack!
snake_case__ = feat_extract.pad(_a , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _a )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.feat_extract_dict
snake_case__ = True
snake_case__ = self.feature_extraction_class(**_a )
snake_case__ = self.feat_extract_tester.prepare_inputs_for_target()
snake_case__ = [len(_a ) for x in speech_inputs]
snake_case__ = feat_extract.model_input_names[0]
snake_case__ = BatchFeature({input_name: speech_inputs} )
snake_case__ = min(_a )
snake_case__ = feat_extract.num_mel_bins # hack!
snake_case__ = feat_extract.pad(
_a , padding='''max_length''' , max_length=_a , truncation=_a , return_tensors='''np''' )
self.assertIn('''attention_mask''' , _a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:List[str] ):
from datasets import load_dataset
snake_case__ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case__ = ds.sort('''id''' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
snake_case__ = self._load_datasamples(1 )
snake_case__ = SpeechTaFeatureExtractor()
snake_case__ = feature_extractor(_a , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , _a , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
snake_case__ = self._load_datasamples(1 )
snake_case__ = SpeechTaFeatureExtractor()
snake_case__ = feature_extractor(audio_target=_a , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1e-4 ) )
| 33 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
a_ :Optional[int] = TypeVar("T")
class snake_case__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple, _snake_case : Any, _snake_case : int ) ->Optional[int]:
snake_case__ : Any | T = None
snake_case__ : int = len(_snake_case )
snake_case__ : list[T] = [any_type for _ in range(self.N )] + arr
snake_case__ : int = fnc
self.build()
def lowercase_ ( self : Tuple ) ->List[Any]:
for p in range(self.N - 1, 0, -1 ):
snake_case__ : Union[str, Any] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def lowercase_ ( self : Dict, _snake_case : int, _snake_case : Dict ) ->Any:
p += self.N
snake_case__ : str = v
while p > 1:
snake_case__ : Union[str, Any] = p // 2
snake_case__ : List[str] = self.fn(self.st[p * 2], self.st[p * 2 + 1] )
def lowercase_ ( self : int, _snake_case : int, _snake_case : Union[str, Any] ) ->List[str]: # noqa: E741
snake_case__ : str = l + self.N, r + self.N
snake_case__ : T | None = None
while l <= r:
if l % 2 == 1:
snake_case__ : Any = self.st[l] if res is None else self.fn(_snake_case, self.st[l] )
if r % 2 == 0:
snake_case__ : Optional[Any] = self.st[r] if res is None else self.fn(_snake_case, self.st[r] )
snake_case__ : List[str] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
a_ :Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
a_ :Optional[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
a_ :Dict = SegmentTree(test_array, min)
a_ :int = SegmentTree(test_array, max)
a_ :Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def lowercase_ ():
for i in range(len(__UpperCamelCase ) ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
snake_case__ : Dict = reduce(__UpperCamelCase , test_array[i : j + 1] )
snake_case__ : Dict = reduce(__UpperCamelCase , test_array[i : j + 1] )
snake_case__ : Dict = reduce(lambda A , A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
assert max_range == max_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
assert sum_range == sum_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
a_ :Optional[int] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 478 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.ModuleList(lowerCamelCase_ )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : Any = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : List[Any] = False , lowerCamelCase_ : int = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ , self.nets ) ):
UpperCamelCase = controlnet(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# merge samples
if i == 0:
UpperCamelCase = down_samples, mid_sample
else:
UpperCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCamelCase_ , lowerCamelCase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] = True , lowerCamelCase_ : List[Any] = None , lowerCamelCase_ : Dict = False , lowerCamelCase_ : Any = None , ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCamelCase_ , is_main_process=lowerCamelCase_ , save_function=lowerCamelCase_ , safe_serialization=lowerCamelCase_ , variant=lowerCamelCase_ , )
idx += 1
UpperCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase = pretrained_model_path
while os.path.isdir(lowerCamelCase_ ):
UpperCamelCase = ControlNetModel.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
controlnets.append(lowerCamelCase_ )
idx += 1
UpperCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowerCamelCase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowerCamelCase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowerCamelCase_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(lowerCamelCase_ )
| 537 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase_ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase_ = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCamelCase_ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
A : Optional[int] = ['''input_ids''', '''attention_mask''']
A : List[Any] = DistilBertTokenizer
def __init__( self, A=None, A=None, A=True, A="[UNK]", A="[SEP]", A="[PAD]", A="[CLS]", A="[MASK]", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(
A, tokenizer_file=A, do_lower_case=A, unk_token=A, sep_token=A, pad_token=A, cls_token=A, mask_token=A, tokenize_chinese_chars=A, strip_accents=A, **A, )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', A ) != do_lower_case
or normalizer_state.get('strip_accents', A ) != strip_accents
or normalizer_state.get('handle_chinese_chars', A ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(A, normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : List[str] = strip_accents
SCREAMING_SNAKE_CASE : List[str] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Dict = normalizer_class(**A )
SCREAMING_SNAKE_CASE : Union[str, Any] = do_lower_case
def UpperCamelCase_ ( self, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(A, name=A )
return tuple(A )
| 28 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Union[str, Any] = MvpTokenizer
a__: Union[str, Any] = MvpTokenizerFast
a__: Tuple = True
a__: List[Any] = filter_roberta_detectors
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase__ ( self ):
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def UpperCAmelCase__ ( self ):
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCamelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(UpperCAmelCase , max_length=len(UpperCAmelCase ) , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertNotIn('''labels''' , UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , UpperCAmelCase )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCAmelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['A long paragraph for summarization.']
lowerCamelCase_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase_ = tokenizer(UpperCAmelCase , text_target=UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase_ = inputs['input_ids']
lowerCamelCase_ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = 'A, <mask> AllenNLP sentence.'
lowerCamelCase_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 29 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase_ = get_tests_dir("fixtures")
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE : List[Any] = 500
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Any = HTTPError
SCREAMING_SNAKE_CASE : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=A ) as mock_head:
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(A )
@is_staging_test
class _a ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : int = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='test-image-processor', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor.from_pretrained(F"{USER}/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
SCREAMING_SNAKE_CASE : str = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A, repo_id='valid_org/test-image-processor-org', push_to_hub=A, use_auth_token=self._token )
SCREAMING_SNAKE_CASE : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A, getattr(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE : Tuple = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
SCREAMING_SNAKE_CASE : Optional[int] = AutoImageProcessor.from_pretrained(
F"{USER}/test-dynamic-image-processor", trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 28 | 0 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , _lowercase : Tuple , _lowercase : List[Any]=13 , _lowercase : Optional[int]=7 , _lowercase : Union[str, Any]=True , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Optional[int]=True , _lowercase : Optional[int]=99 , _lowercase : int=32 , _lowercase : Union[str, Any]=5 , _lowercase : str=4 , _lowercase : Any=37 , _lowercase : Tuple="gelu" , _lowercase : Union[str, Any]=0.1 , _lowercase : List[Any]=0.1 , _lowercase : str=5_12 , _lowercase : Optional[int]=16 , _lowercase : List[Any]=2 , _lowercase : Union[str, Any]=0.0_2 , _lowercase : List[Any]=False , _lowercase : int=True , _lowercase : int="None" , _lowercase : Union[str, Any]=3 , _lowercase : Tuple=4 , _lowercase : Optional[int]=None , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = relative_attention
UpperCAmelCase__ = position_biased_input
UpperCAmelCase__ = pos_att_type
UpperCAmelCase__ = scope
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.get_config()
UpperCAmelCase__ = 3_00
return config
def _UpperCAmelCase ( self : int , _lowercase : Optional[int] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCAmelCase ( self : Any , _lowercase : List[Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = DebertaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )[0]
UpperCAmelCase__ = model(_lowercase , token_type_ids=_lowercase )[0]
UpperCAmelCase__ = model(_lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = DebertaForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str , _lowercase : str , _lowercase : Any , _lowercase : Tuple , _lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = DebertaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_lowercase )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Dict , _lowercase : int , _lowercase : int , _lowercase : Dict , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = DebertaForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : List[Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : int , _lowercase : Any , _lowercase : List[str] , _lowercase : Tuple , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = DebertaForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
UpperCAmelCase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__= (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__= (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A__= True
A__= False
A__= False
A__= False
A__= False
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = DebertaModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_lowercase )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_lowercase )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_lowercase )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_lowercase )
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_lowercase )
@slow
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = DebertaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@slow
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = DebertaModel.from_pretrained("microsoft/deberta-base" )
UpperCAmelCase__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
UpperCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase__ = model(_lowercase , attention_mask=_lowercase )[0]
# compare the actual values for a slice.
UpperCAmelCase__ = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 475 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
snake_case__ : Tuple = logging.get_logger(__name__)
class snake_case ( _snake_case ):
'''simple docstring'''
def __init__( self : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ) ->Any:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 392 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowercase__( *__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Optional[Union[Dict, Any]] = None ,__UpperCamelCase: Dict=True ,__UpperCamelCase: List[Any]=2 ):
"""simple docstring"""
from .. import __version__
SCREAMING_SNAKE_CASE : int = take_from
SCREAMING_SNAKE_CASE : Optional[int] = ()
if not isinstance(args[0] ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
SCREAMING_SNAKE_CASE : Tuple = None
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(__UpperCamelCase ,__UpperCamelCase ):
values += (getattr(__UpperCamelCase ,__UpperCamelCase ),)
SCREAMING_SNAKE_CASE : Optional[int] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
SCREAMING_SNAKE_CASE : Dict = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
SCREAMING_SNAKE_CASE : Dict = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase )
if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
SCREAMING_SNAKE_CASE : Any = call_frame.filename
SCREAMING_SNAKE_CASE : Tuple = call_frame.lineno
SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.function
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(__UpperCamelCase ) == 0:
return
elif len(__UpperCamelCase ) == 1:
return values[0]
return values
| 28 | 0 |
import datasets
from .evaluate import evaluate
a ="""\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"""
a ="""\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"""
a ="""\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : str):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string'),
'prediction_text': datasets.features.Sequence(datasets.Value('string')),
},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) ,codebase_urls=['https://www.atticusprojectai.org/cuad'] ,reference_urls=['https://www.atticusprojectai.org/cuad'] ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Tuple = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__lowerCamelCase : List[Any] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__lowerCamelCase : Dict = evaluate(dataset=SCREAMING_SNAKE_CASE__ ,predictions=SCREAMING_SNAKE_CASE__)
return score
| 652 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = GPTSanJapaneseTokenizer
__lowerCamelCase = False
__lowerCamelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : Union[str, Any] =['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Optional[Any] ={'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
SCREAMING_SNAKE_CASE_ : Tuple ={'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def _snake_case ( self , **__A ) -> str:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , __A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] ='こんにちは、世界。 \nこんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _snake_case ( self , __A ) -> int:
SCREAMING_SNAKE_CASE_ : str =self.get_input_output_texts(__A )
SCREAMING_SNAKE_CASE_ : str =tokenizer.encode(__A , add_special_tokens=__A )
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def _snake_case ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _snake_case ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def _snake_case ( self ) -> Optional[Any]:
pass # TODO add if relevant
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : int ='こんにちは、世界。 こんばんは、㔺界。'
SCREAMING_SNAKE_CASE_ : str =['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[int] =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : int =self.get_tokenizer()
# Testing tokenization
SCREAMING_SNAKE_CASE_ : int ='こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
SCREAMING_SNAKE_CASE_ : List[str] ='こんにちは、、、、世界。こんばんは、、、、世界。'
SCREAMING_SNAKE_CASE_ : str =tokenizer.encode(__A )
SCREAMING_SNAKE_CASE_ : str =tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : List[str] =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : List[Any] ='こんにちは、世界。'
SCREAMING_SNAKE_CASE_ : Dict ='こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : List[Any] ='こんにちは、世界。こんばんは、世界。😀'
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer.encode(prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
SCREAMING_SNAKE_CASE_ : int =tokenizer.encode(__A , prefix_text=__A )
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer.decode(__A )
SCREAMING_SNAKE_CASE_ : Any =tokenizer.decode(__A )
SCREAMING_SNAKE_CASE_ : Any =tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Optional[Any] ='こんにちは、世界。'
SCREAMING_SNAKE_CASE_ : Dict ='こんばんは、㔺界。😀'
SCREAMING_SNAKE_CASE_ : str =len(tokenizer.encode(__A ) ) - 2
SCREAMING_SNAKE_CASE_ : Optional[Any] =len(tokenizer.encode(__A ) ) - 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[1] + [0] * (len_prefix + len_text + 1)
SCREAMING_SNAKE_CASE_ : Optional[Any] =[1] * (len_prefix + len_text + 1) + [0]
SCREAMING_SNAKE_CASE_ : List[str] =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer(prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple =tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : List[str] =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE_ : str =tokenizer.encode('''あンいワ''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =tokenizer.encode('''''' , prefix_text='''あンいワ''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
SCREAMING_SNAKE_CASE_ : Any =[['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
SCREAMING_SNAKE_CASE_ : Any =tokenizer(__A , padding=__A )
SCREAMING_SNAKE_CASE_ : Optional[int] =tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
SCREAMING_SNAKE_CASE_ : int =[[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
SCREAMING_SNAKE_CASE_ : str =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
SCREAMING_SNAKE_CASE_ : List[Any] =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def _snake_case ( self ) -> str:
pass
def _snake_case ( self ) -> Optional[int]:
pass
| 443 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
import pprint
import requests
a : Union[str, Any] = '''https://zenquotes.io/api'''
def lowercase_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase_ ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
a : Optional[int] = random_quotes()
pprint.pprint(response)
| 639 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if tokenize_kwargs is None:
SCREAMING_SNAKE_CASE : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
SCREAMING_SNAKE_CASE : Tuple = truncation
SCREAMING_SNAKE_CASE : int = tokenize_kwargs
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.framework
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(A, return_tensors=A, **A )
return model_inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self, *A, **A ):
'''simple docstring'''
return super().__call__(*A, **A )
| 28 | 0 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A :
@staticmethod
def __SCREAMING_SNAKE_CASE ( *__magic_name__ : List[Any] , **__magic_name__ : Dict ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class A ( unittest.TestCase ):
@require_torch
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_classifier(__magic_name__ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__magic_name__ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_classifier(__magic_name__ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
[
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
{"score": 0.333, "label": ANY(__magic_name__ )},
],
] , )
@slow
@require_torch
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_classifier(__magic_name__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase__ = image_classifier(__magic_name__ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
lowerCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 48 |
'''simple docstring'''
from __future__ import annotations
import queue
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : List[str] = None
def lowercase__( ):
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
SCREAMING_SNAKE_CASE : str = input('Enter the value of the root node: ' ).strip().lower()
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE : Dict = TreeNode(int(__UpperCamelCase ) )
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
SCREAMING_SNAKE_CASE : Optional[int] = f"Enter the left node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Any = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : str = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = left_node
q.put(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Enter the right node of {node_found.data}: "
SCREAMING_SNAKE_CASE : Dict = input(__UpperCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE : Optional[int] = TreeNode(int(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE : Any = right_node
q.put(__UpperCamelCase )
raise
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Optional[int] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : queue.Queue = queue.Queue()
q.put(__UpperCamelCase )
while not q.empty():
SCREAMING_SNAKE_CASE : Union[str, Any] = []
while not q.empty():
SCREAMING_SNAKE_CASE : List[Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__UpperCamelCase )
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : Optional[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Any = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE : List[Any] = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE : Any = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE : list[TreeNode] = []
SCREAMING_SNAKE_CASE : int = node
while n or stack:
while n:
stack.append(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = n.left
SCREAMING_SNAKE_CASE : Tuple = stack.pop()
print(n.data ,end=',' )
SCREAMING_SNAKE_CASE : str = n.right
def lowercase__( __UpperCamelCase: TreeNode ):
"""simple docstring"""
if not isinstance(__UpperCamelCase ,__UpperCamelCase ) or not node:
return
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = [], []
SCREAMING_SNAKE_CASE : Optional[int] = node
stacka.append(__UpperCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__UpperCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowercase__( __UpperCamelCase: str = "" ,__UpperCamelCase: Dict=50 ,__UpperCamelCase: Optional[int]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = divmod(width - len(__UpperCamelCase ) - 2 ,2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
UpperCamelCase_ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 28 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _A , _A , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def __a ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , )
lowercase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
lowercase = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowerCamelCase , set_alpha_to_zero=__lowerCamelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
lowercase = CLIPTextModel(__lowerCamelCase )
lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowercase = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowercase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Any=0 ) -> Any:
'''simple docstring'''
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=0 ) -> Any:
'''simple docstring'''
lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowercase = torch.manual_seed(__lowerCamelCase )
else:
lowercase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __a ( self : str ) -> List[Any]:
'''simple docstring'''
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase = self.get_dummy_inputs(__lowerCamelCase )
lowercase = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
lowercase = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase , __lowerCamelCase ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
lowercase = self.get_dummy_inputs(__lowerCamelCase )
lowercase = pipe_loaded(**__lowerCamelCase )[0]
lowercase = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase , 1E-4 )
def __a ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase = 'cpu'
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = self.get_dummy_mask_inputs(__lowerCamelCase )
lowercase = pipe.generate_mask(**__lowerCamelCase )
lowercase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase = np.array([0] * 9 )
lowercase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __a ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase = 'cpu'
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowercase = pipe.invert(**__lowerCamelCase ).images
lowercase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def __a ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __a ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 'cpu'
lowercase = self.get_dummy_components()
lowercase = {'beta_start': 0.0_0085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
lowercase = DPMSolverMultistepScheduler(**__lowerCamelCase )
lowercase = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
lowercase = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowercase = pipe.invert(**__lowerCamelCase ).images
lowercase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
def __a ( self : Union[str, Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __a ( cls : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
lowercase = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
lowercase = raw_image
def __a ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase = torch.manual_seed(0 )
lowercase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowercase = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = 'a bowl of fruit'
lowercase = 'a bowl of pears'
lowercase = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowercase = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase ).latents
lowercase = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
lowercase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = torch.manual_seed(0 )
lowercase = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase = 'a bowl of fruit'
lowercase = 'a bowl of pears'
lowercase = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowercase = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase , num_inference_steps=25 , ).latents
lowercase = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
lowercase = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 604 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __magic_name__ (snake_case_ ):
'''simple docstring'''
def __init__( self:Optional[int] , _a:Dict = 1_01 ):
snake_case__ = length
def __len__( self:Union[str, Any] ):
return self.length
def __getitem__( self:List[str] , _a:List[Any] ):
return i
class __magic_name__ :
'''simple docstring'''
def __call__( self:Dict , _a:Optional[int] ):
return {"input_ids": torch.tensor(_a ), "labels": torch.tensor(_a )}
class __magic_name__ (nn.Module ):
'''simple docstring'''
def __init__( self:int ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
snake_case__ = nn.Linear(1_20 , 80 )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple , _a:List[str]=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
snake_case__ = F"""--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F"""--output_dir {output_dir}""".split()
snake_case__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = F"""--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n """.split()
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F"""--output_dir {output_dir}""".split()
snake_case__ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ : Any = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ : str = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
lowerCamelCase__ : List[Any] = DummyDataset(dataset_length)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
snake_case__ = list(range(len(__UpperCamelCase ) ) )
snake_case__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
lowerCamelCase__ : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ : Optional[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : Optional[int] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : Any = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Any = None
| 33 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(A )
def UpperCamelCase_ ( self, A, A, A, A, A, A = None, A = None, A = None, A = None, A = False, A = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(A, A, self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = controlnet(
A, A, A, A, A, A, A, A, A, A, A, )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A, A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase_ ( self, A, A = True, A = None, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A, is_main_process=A, save_function=A, safe_serialization=A, variant=A, )
idx += 1
SCREAMING_SNAKE_CASE : List[Any] = model_path_to_save + F"_{idx}"
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : List[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(A ):
SCREAMING_SNAKE_CASE : Optional[int] = ControlNetModel.from_pretrained(A, **A )
controlnets.append(A )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"_{idx}"
logger.info(F"{len(A )} controlnets loaded from {pretrained_model_path}." )
if len(A ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(A )
| 28 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ :Dict = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self : Tuple, _snake_case : Any = True, _snake_case : Tuple = None, _snake_case : Optional[Any] = PILImageResampling.BICUBIC, _snake_case : Optional[Any] = True, _snake_case : Union[str, Any] = None, _snake_case : Optional[int] = True, _snake_case : Union[str, Any] = 1 / 2_5_5, _snake_case : Optional[Any] = True, _snake_case : int = IMAGENET_DEFAULT_MEAN, _snake_case : Union[str, Any] = IMAGENET_DEFAULT_STD, **_snake_case : List[Any], ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : Any = size if size is not None else {'shortest_edge': 2_2_4}
snake_case__ : List[str] = get_size_dict(_snake_case, default_to_square=_snake_case )
snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
snake_case__ : List[str] = get_size_dict(_snake_case, param_name='crop_size' )
snake_case__ : Any = do_resize
snake_case__ : List[str] = size
snake_case__ : Any = resample
snake_case__ : Optional[Any] = do_center_crop
snake_case__ : Any = crop_size
snake_case__ : Tuple = do_rescale
snake_case__ : Optional[Any] = rescale_factor
snake_case__ : List[Any] = do_normalize
snake_case__ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : Any, _snake_case : List[Any], _snake_case : List[Any], _snake_case : Any = PILImageResampling.BICUBIC, _snake_case : int = None, **_snake_case : List[str], ) ->Dict:
snake_case__ : int = get_size_dict(_snake_case, default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case__ : str = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
snake_case__ : str = get_resize_output_image_size(_snake_case, size=_snake_case, default_to_square=_snake_case )
snake_case__ : Dict = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_snake_case, size=(size_dict['height'], size_dict['width']), resample=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Any, _snake_case : Any = None, **_snake_case : Any, ) ->Optional[Any]:
snake_case__ : Optional[int] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_snake_case, size=(size['height'], size['width']), data_format=_snake_case, **_snake_case )
def lowercase_ ( self : List[Any], _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : List[Any] = None, **_snake_case : int, ) ->Any:
return rescale(_snake_case, scale=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Optional[int], _snake_case : Dict, _snake_case : Union[str, Any], _snake_case : Union[str, Any], _snake_case : Optional[Any] = None, **_snake_case : Any, ) ->int:
return normalize(_snake_case, mean=_snake_case, std=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Tuple, _snake_case : Optional[Any], _snake_case : List[Any] = None, _snake_case : Tuple = None, _snake_case : List[str] = None, _snake_case : List[str] = None, _snake_case : List[str] = None, _snake_case : Dict = None, _snake_case : Any = None, _snake_case : Optional[Any] = None, _snake_case : List[Any] = None, _snake_case : Optional[Any] = None, _snake_case : List[str] = None, _snake_case : List[str] = ChannelDimension.FIRST, **_snake_case : int, ) ->Union[str, Any]:
snake_case__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : Optional[Any] = resample if resample is not None else self.resample
snake_case__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
snake_case__ : Optional[Any] = image_std if image_std is not None else self.image_std
snake_case__ : Dict = size if size is not None else self.size
snake_case__ : List[str] = get_size_dict(_snake_case, default_to_square=_snake_case )
snake_case__ : Tuple = crop_size if crop_size is not None else self.crop_size
snake_case__ : Union[str, Any] = get_size_dict(_snake_case, param_name='crop_size' )
snake_case__ : int = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ : Tuple = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
snake_case__ : List[str] = [self.resize(_snake_case, _snake_case, _snake_case ) for image in images]
if do_center_crop:
snake_case__ : int = [self.center_crop(_snake_case, _snake_case ) for image in images]
if do_rescale:
snake_case__ : int = [self.rescale(_snake_case, _snake_case ) for image in images]
if do_normalize:
snake_case__ : List[str] = [self.normalize(_snake_case, _snake_case, _snake_case ) for image in images]
snake_case__ : Optional[int] = [to_channel_dimension_format(_snake_case, _snake_case ) for image in images]
snake_case__ : Tuple = {'pixel_values': images}
return BatchFeature(data=_snake_case, tensor_type=_snake_case )
| 478 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''audio_values''', '''audio_mask''']
def __init__( self, A=2_048, A=1, A=[16, 16], A=128, A=44_100, A=86, A=2_048, A=0.0, **A, ):
'''simple docstring'''
super().__init__(
feature_size=A, sampling_rate=A, padding_value=A, **A, )
SCREAMING_SNAKE_CASE : str = spectrogram_length
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE : Dict = n_fft
SCREAMING_SNAKE_CASE : Tuple = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE : str = sampling_rate
SCREAMING_SNAKE_CASE : int = padding_value
SCREAMING_SNAKE_CASE : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=A, min_frequency=0.0, max_frequency=2_20_50.0, sampling_rate=A, norm='slaney', mel_scale='slaney', ).T
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = spectrogram(
A, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
SCREAMING_SNAKE_CASE : Union[str, Any] = log_spec[:, :-1]
SCREAMING_SNAKE_CASE : List[Any] = log_spec - 20.0
SCREAMING_SNAKE_CASE : Optional[Any] = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, A, A = None, A = True, A = None, A = False, A = False, **A, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
SCREAMING_SNAKE_CASE : List[Any] = isinstance(A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE : Tuple = np.array(A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = padded_audio_features * self.padding_value
for i in range(len(A ) ):
SCREAMING_SNAKE_CASE : Optional[int] = audio_features[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE : Any = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
SCREAMING_SNAKE_CASE : Dict = {'audio_values': padded_audio_features}
SCREAMING_SNAKE_CASE : str = BatchFeature(data=A, tensor_type=A )
return encoded_inputs
| 28 | 0 |
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Dict , lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCamelCase = val
UpperCamelCase = None
UpperCamelCase = None
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
UpperCamelCase = Node(lowerCamelCase_ )
else:
self.left.insert(lowerCamelCase_ )
elif val > self.val:
if self.right is None:
UpperCamelCase = Node(lowerCamelCase_ )
else:
self.right.insert(lowerCamelCase_ )
else:
UpperCamelCase = val
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if root:
inorder(root.left , __UpperCamelCase )
res.append(root.val )
inorder(root.right , __UpperCamelCase )
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
return arr
UpperCamelCase = Node(arr[0] )
for i in range(1 , len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
UpperCamelCase = []
inorder(__UpperCamelCase , __UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 537 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = 9, 14 # noqa: F841
SCREAMING_SNAKE_CASE : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(__UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
SCREAMING_SNAKE_CASE : Dict = mst(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
SCREAMING_SNAKE_CASE : Any = tuple(answer[:2] )
SCREAMING_SNAKE_CASE : List[Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 28 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
A_ = """"""
A_ = """"""
A_ = """"""
A_ = 1 # (0 is vertical, 1 is horizontal)
def lowercase ( ):
lowerCamelCase_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print('''Processing...''' )
lowerCamelCase_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase_ = random_chars(32 )
lowerCamelCase_ = paths[index].split(os.sep )[-1].rsplit('''.''' ,1 )[0]
lowerCamelCase_ = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(f"/{file_root}.jpg" ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"Success {index+1}/{len(__UpperCamelCase )} with {file_name}" )
lowerCamelCase_ = []
for anno in new_annos[index]:
lowerCamelCase_ = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__UpperCamelCase )
with open(f"/{file_root}.txt" ,'''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,'''*.txt''' ) ):
lowerCamelCase_ = label_file.split(os.sep )[-1].rsplit('''.''' ,1 )[0]
with open(__UpperCamelCase ) as in_file:
lowerCamelCase_ = in_file.readlines()
lowerCamelCase_ = os.path.join(__UpperCamelCase ,f"{label_name}.jpg" )
lowerCamelCase_ = []
for obj_list in obj_lists:
lowerCamelCase_ = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 1 ):
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for idx in range(len(__UpperCamelCase ) ):
lowerCamelCase_ = []
lowerCamelCase_ = img_list[idx]
path_list.append(__UpperCamelCase )
lowerCamelCase_ = anno_list[idx]
lowerCamelCase_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
lowerCamelCase_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def lowercase ( lowerCAmelCase__ = 32 ):
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 29 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : int = StableDiffusionDiffEditPipeline
A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
A : str = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A : Union[str, Any] = frozenset([] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, )
SCREAMING_SNAKE_CASE : int = DDIMScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, )
SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, )
SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE : int = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Dict = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' )
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Any = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class, '_optional_components' ):
return
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A, A, A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0]
SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max()
self.assertLess(A, 1E-4 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = 'cpu'
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A )
SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape, (1, 16, 16) )
SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 )
SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
self.assertEqual(mask[0, -3, -4], 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'cpu'
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'}
SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A )
SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A )
SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape, (2, 32, 32, 3) )
SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], )
SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A, 1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) )
SCREAMING_SNAKE_CASE : List[str] = raw_image
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears'
SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : List[Any] = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : str = 'a bowl of fruit'
SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears'
SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask(
image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert(
prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents
SCREAMING_SNAKE_CASE : str = pipe(
prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0]
SCREAMING_SNAKE_CASE : Tuple = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 28 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 42
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Any , _lowercase : List[Any] = 16 , _lowercase : Optional[Any] = 88 , _lowercase : str = None , _lowercase : Union[str, Any] = None , _lowercase : Tuple = 1 , _lowercase : Optional[int] = 0.0 , _lowercase : int = 32 , _lowercase : List[str] = None , _lowercase : List[str] = False , _lowercase : Optional[Any] = None , _lowercase : int = "geglu" , _lowercase : Dict = True , _lowercase : List[str] = True , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = attention_head_dim
UpperCAmelCase__ = num_attention_heads * attention_head_dim
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = torch.nn.GroupNorm(num_groups=_lowercase , num_channels=_lowercase , eps=1E-6 , affine=_lowercase )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase )
# 3. Define transformers blocks
UpperCAmelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , cross_attention_dim=_lowercase , activation_fn=_lowercase , attention_bias=_lowercase , double_self_attention=_lowercase , norm_elementwise_affine=_lowercase , )
for d in range(_lowercase )
] )
UpperCAmelCase__ = nn.Linear(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Any , _lowercase : List[Any] , _lowercase : Dict=None , _lowercase : Union[str, Any]=None , _lowercase : int=None , _lowercase : Dict=1 , _lowercase : List[Any]=None , _lowercase : str = True , ):
"""simple docstring"""
UpperCAmelCase__ = hidden_states.shape
UpperCAmelCase__ = batch_frames // num_frames
UpperCAmelCase__ = hidden_states
UpperCAmelCase__ = hidden_states[None, :].reshape(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
UpperCAmelCase__ = self.norm(_lowercase )
UpperCAmelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _lowercase , _lowercase )
UpperCAmelCase__ = self.proj_in(_lowercase )
# 2. Blocks
for block in self.transformer_blocks:
UpperCAmelCase__ = block(
_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase , cross_attention_kwargs=_lowercase , class_labels=_lowercase , )
# 3. Output
UpperCAmelCase__ = self.proj_out(_lowercase )
UpperCAmelCase__ = (
hidden_states[None, None, :]
.reshape(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
UpperCAmelCase__ = hidden_states.reshape(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_lowercase )
| 475 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,__UpperCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 28 | 0 |
import inspect
import unittest
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) ->Tuple:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self : int ) ->List[Any]:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
UpperCAmelCase__ = inspect.getmembers(lowerCamelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
UpperCAmelCase__ = 'k-diffusion'
elif backend == "invisible_watermark":
UpperCAmelCase__ = 'invisible-watermark'
assert backend in deps, f'''{backend} is not in the deps table!'''
| 392 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = LongformerTokenizer
A : List[str] = True
A : Optional[int] = LongformerTokenizerFast
A : Tuple = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE : Tuple = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[Any] = 'lower newer'
SCREAMING_SNAKE_CASE : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=A ), [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=A ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode(
'sequence builders', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(
'sequence builders', 'multi-sequence build', add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[int] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
SCREAMING_SNAKE_CASE : str = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(A, add_special_tokens=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE : Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.convert_tokens_to_ids(A )
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE : List[str] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Tuple = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE : Any = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), )
SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['add_prefix_space'], A )
self.assertEqual(post_processor_state['trim_offsets'], A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Any = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : str = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 28 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCamelCase : List[Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__)
self.assertTrue(isinstance(dc.token_ids ,SCREAMING_SNAKE_CASE__))
with self.assertRaises(SCREAMING_SNAKE_CASE__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(SCREAMING_SNAKE_CASE__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def lowerCAmelCase ( self : int):
__lowerCamelCase : str = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(SCREAMING_SNAKE_CASE__):
DisjunctiveConstraint(SCREAMING_SNAKE_CASE__) # fails here
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = [[1, 2, 3], [1, 2, 4]]
__lowerCamelCase : List[str] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = dc.update(1)
__lowerCamelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__lowerCamelCase : Tuple = dc.update(2)
__lowerCamelCase : int = stepped is True and completed is False and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__lowerCamelCase : List[Any] = dc.update(3)
__lowerCamelCase : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(SCREAMING_SNAKE_CASE__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCamelCase : Optional[Any] = DisjunctiveConstraint(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
__lowerCamelCase : Optional[Any] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
__lowerCamelCase : Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
__lowerCamelCase : Dict = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
__lowerCamelCase : str = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
__lowerCamelCase : List[str] = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
__lowerCamelCase : Dict = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 652 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = StableDiffusionXLImgaImgPipeline
A : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
A : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
A : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), attention_head_dim=(2, 4), use_linear_projection=A, addition_embed_type='text_time', addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, cross_attention_dim=64, )
SCREAMING_SNAKE_CASE : str = EulerDiscreteScheduler(
beta_start=0.0_00_85, beta_end=0.0_12, steps_offset=1, beta_schedule='scaled_linear', timestep_spacing='leading', )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=32, )
SCREAMING_SNAKE_CASE : int = CLIPTextModel(A )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModelWithProjection(A )
SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip', local_files_only=A )
SCREAMING_SNAKE_CASE : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase_ ( self, A, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
SCREAMING_SNAKE_CASE : str = image / 2 + 0.5
if str(A ).startswith('mps' ):
SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Any = sd_pipe(**A ).images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionXLImgaImgPipeline(**A )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
# forward without prompt embeds
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : Optional[Any] = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt
SCREAMING_SNAKE_CASE : Optional[int] = 3 * [inputs['prompt']]
SCREAMING_SNAKE_CASE : int = sd_pipe(**A )
SCREAMING_SNAKE_CASE : List[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A )
SCREAMING_SNAKE_CASE : str = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE : int = 3 * [inputs.pop('prompt' )]
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = sd_pipe.encode_prompt(A, negative_prompt=A )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
**A, prompt_embeds=A, negative_prompt_embeds=A, pooled_prompt_embeds=A, negative_pooled_prompt_embeds=A, )
SCREAMING_SNAKE_CASE : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self, A, A="cpu", A=torch.floataa, A=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.RandomState(A ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(A ).to(device=A, dtype=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_inputs(A )
SCREAMING_SNAKE_CASE : str = pipe(**A ).images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 28 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''char'''
A : Any = '''bpe'''
A : Dict = '''wp'''
UpperCamelCase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = ['''image_processor''', '''char_tokenizer''']
A : int = '''ViTImageProcessor'''
A : List[str] = '''MgpstrTokenizer'''
def __init__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', A, )
SCREAMING_SNAKE_CASE : str = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('gpt2' )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(A, A )
def __call__( self, A=None, A=None, A=None, **A ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(A, return_tensors=A, **A )
if text is not None:
SCREAMING_SNAKE_CASE : int = self.char_tokenizer(A, return_tensors=A, **A )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Any = encodings['input_ids']
return inputs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = sequences
SCREAMING_SNAKE_CASE : List[str] = char_preds.size(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._decode_helper(A, 'char' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._decode_helper(A, 'bpe' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._decode_helper(A, 'wp' )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
for i in range(A ):
SCREAMING_SNAKE_CASE : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE : Dict = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE : List[str] = scores.index(max(A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = final_strs
SCREAMING_SNAKE_CASE : Any = final_scores
SCREAMING_SNAKE_CASE : Dict = char_strs
SCREAMING_SNAKE_CASE : Any = bpe_strs
SCREAMING_SNAKE_CASE : Union[str, Any] = wp_strs
return out
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE : List[Any] = self.char_decode
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : str = '[s]'
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE : str = self.bpe_decode
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : List[str] = '#'
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE : Any = self.wp_decode
SCREAMING_SNAKE_CASE : Tuple = 102
SCREAMING_SNAKE_CASE : List[Any] = '[SEP]'
else:
raise ValueError(F"Format {format} is not supported." )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = [], []
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 )
SCREAMING_SNAKE_CASE : Any = pred_logits.size(1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = pred_logits.topk(1, dim=-1, largest=A, sorted=A )
SCREAMING_SNAKE_CASE : Optional[int] = preds_index.view(-1, A )[:, 1:]
SCREAMING_SNAKE_CASE : List[Any] = decoder(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.softmax(A, dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE : Dict = preds_max_prob[:, 1:]
for index in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = preds_str[index].find(A )
SCREAMING_SNAKE_CASE : List[Any] = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE : Dict = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(A ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A )
conf_scores.append(A )
return dec_strs, conf_scores
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [seq.replace(' ', '' ) for seq in self.char_tokenizer.batch_decode(A )]
return decode_strs
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [seq.replace(' ', '' ) for seq in self.wp_tokenizer.batch_decode(A )]
return decode_strs
| 28 | 0 |
from datetime import datetime as dt
import os
from github import Github
a : List[Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowercase = g.get_repo('''huggingface/transformers''' )
__lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowercase = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCamelCase : i.created_at , reverse=__UpperCamelCase )
__lowercase = comments[0] if len(__UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 639 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger("transformers.models.speecht5")
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Any ):
"""simple docstring"""
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE : Any = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE : str = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint[f"upsamples.{i}.1.weight_g"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"upsamples.{i}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE : int = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : str = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Dict = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
SCREAMING_SNAKE_CASE : Tuple = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
SCREAMING_SNAKE_CASE : Optional[Any] = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE : List[Any] = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE : Union[str, Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: int ,__UpperCamelCase: Any ,__UpperCamelCase: str=None ,__UpperCamelCase: Tuple=None ,):
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGanConfig.from_pretrained(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE : Optional[Any] = SpeechTaHifiGan(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(__UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] ,__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = np.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(__UpperCamelCase ).float()
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(__UpperCamelCase ).float()
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 0 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = input("Enter image url: ").strip()
print(F"Downloading image from {url} ...")
UpperCAmelCase__ : Tuple = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
UpperCAmelCase__ : Dict = soup.find("meta", {"property": "og:image"})["content"]
UpperCAmelCase__ : Tuple = requests.get(image_url).content
UpperCAmelCase__ : int = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"Done. Image saved to disk as {file_name}.")
| 48 |
'''simple docstring'''
from typing import Any
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = data
SCREAMING_SNAKE_CASE : Any = None
def __repr__( self ):
'''simple docstring'''
return F"Node({self.data})"
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = None
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE : List[str] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(A ) for item in self] )
def __getitem__( self, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, A, A ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.head
for _ in range(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = current.next
SCREAMING_SNAKE_CASE : Any = data
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(len(self ), A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
self.insert_nth(0, A )
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
SCREAMING_SNAKE_CASE : Union[str, Any] = Node(A )
if self.head is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_node
elif index == 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # link new_node to head
SCREAMING_SNAKE_CASE : Tuple = new_node
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : str = temp.next
SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE : List[str] = new_node
def UpperCamelCase_ ( self ): # print every node data
'''simple docstring'''
print(self )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCamelCase_ ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self, A = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE : List[str] = self.head.next
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE : Any = temp.next
SCREAMING_SNAKE_CASE : List[str] = temp.next
SCREAMING_SNAKE_CASE : Optional[int] = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.head is None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE : Optional[int] = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE : int = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE : List[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE : List[Any] = prev
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__UpperCamelCase ) == i
linked_list.insert_nth(__UpperCamelCase ,i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__UpperCamelCase ) == 9
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
SCREAMING_SNAKE_CASE : Any = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 ,1 ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE : str = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCamelCase )
assert (
str(__UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : Dict = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(__UpperCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
SCREAMING_SNAKE_CASE : str = input('Enter New Value: ' ).strip()
print('New list:' )
print(__UpperCamelCase )
print(f"length of linked_list is : {len(__UpperCamelCase )}" )
if __name__ == "__main__":
main()
| 28 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A_ = logging.getLogger(__name__)
class __lowercase ( _A ):
lowercase = '''sequence-classification'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Any:
'''simple docstring'''
if type(__lowerCamelCase ) == dict:
lowercase = Namespace(**__lowerCamelCase )
lowercase = glue_output_modes[hparams.task]
lowercase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCamelCase , __lowerCamelCase , self.mode )
def __a ( self : Optional[Any] , **__lowerCamelCase : Dict ) -> Any:
'''simple docstring'''
return self.model(**__lowerCamelCase )
def __a ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowercase = self(**__lowerCamelCase )
lowercase = outputs[0]
lowercase = self.trainer.lr_schedulers[0]['scheduler']
lowercase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __a ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase = self.hparams
lowercase = processors[args.task]()
lowercase = processor.get_labels()
for mode in ["train", "dev"]:
lowercase = self._feature_file(__lowerCamelCase )
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowerCamelCase )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
lowercase = convert_examples_to_features(
__lowerCamelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , __lowerCamelCase )
torch.save(__lowerCamelCase , __lowerCamelCase )
def __a ( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] = False ) -> Tuple:
'''simple docstring'''
lowercase = 'dev' if mode == 'test' else mode
lowercase = self._feature_file(__lowerCamelCase )
logger.info('''Loading features from cached file %s''' , __lowerCamelCase )
lowercase = torch.load(__lowerCamelCase )
lowercase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowercase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowercase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowercase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , batch_size=__lowerCamelCase , shuffle=__lowerCamelCase , )
def __a ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowercase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
lowercase = self(**__lowerCamelCase )
lowercase = outputs[:2]
lowercase = logits.detach().cpu().numpy()
lowercase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __a ( self : Any , __lowerCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
lowercase = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowercase = np.argmax(__lowerCamelCase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowercase = np.squeeze(__lowerCamelCase )
lowercase = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = [[] for _ in range(out_label_ids.shape[0] )]
lowercase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCamelCase , __lowerCamelCase )}
lowercase = dict(results.items() )
lowercase = results
return ret, preds_list, out_label_list
def __a ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase = self._eval_end(__lowerCamelCase )
lowercase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __a ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase = self._eval_end(__lowerCamelCase )
lowercase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __a ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(__lowerCamelCase , __lowerCamelCase )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=__lowerCamelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=__lowerCamelCase , required=__lowerCamelCase , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowerCamelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
lowercase = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase, os.getcwd() )
lowercase = GLUETransformer.add_model_specific_args(__UpperCamelCase, os.getcwd() )
lowercase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowercase = os.path.join(
'''./results''', f'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}', )
os.makedirs(args.output_dir )
lowercase = GLUETransformer(__UpperCamelCase )
lowercase = generic_train(__UpperCamelCase, __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt''' ), recursive=__UpperCamelCase ) )
lowercase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main()
| 604 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1_333}
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Any = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : List[str] = do_pad
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self, A, A=False ):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE : List[Any] = image_inputs[0]
if isinstance(A, Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE : int = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE : int = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A, key=lambda A : item[0] )[0]
SCREAMING_SNAKE_CASE : str = max(A, key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A, 'image_mean' ) )
self.assertTrue(hasattr(A, 'image_std' ) )
self.assertTrue(hasattr(A, 'do_normalize' ) )
self.assertTrue(hasattr(A, 'do_resize' ) )
self.assertTrue(hasattr(A, 'size' ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} )
self.assertEqual(image_processor.do_pad, A )
SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A, batched=A )
SCREAMING_SNAKE_CASE : Tuple = image_processing(A, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, numpify=A )
for image in image_inputs:
self.assertIsInstance(A, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A, return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.image_processor_tester.get_expected_values(A, batched=A )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(do_resize=A, do_normalize=A, do_rescale=A )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=A, torchify=A )
for image in image_inputs:
self.assertIsInstance(A, torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE : List[str] = image_processing_a.pad(A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Dict = image_processing_a(A, return_tensors='pt' )
self.assertTrue(
torch.allclose(encoded_images_with_method['pixel_values'], encoded_images['pixel_values'], atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
SCREAMING_SNAKE_CASE : Any = {'image_id': 39_769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE : Any = YolosImageProcessor.from_pretrained('hustvl/yolos-small' )
SCREAMING_SNAKE_CASE : int = image_processing(images=A, annotations=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify orig_size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : str = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
SCREAMING_SNAKE_CASE : int = json.loads(f.read() )
SCREAMING_SNAKE_CASE : List[Any] = {'file_name': '000000039769.png', 'image_id': 39_769, 'segments_info': target}
SCREAMING_SNAKE_CASE : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE : int = YolosImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE : str = image_processing(images=A, annotations=A, masks_path=A, return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['pixel_values'].shape, A )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A, atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A ) )
# verify boxes
SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A, atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A ) )
# verify is_crowd
SCREAMING_SNAKE_CASE : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A ) )
# verify class_labels
SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A ) )
# verify masks
SCREAMING_SNAKE_CASE : Optional[int] = 822_873
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A )
# verify orig_size
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A ) )
# verify size
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A ) )
| 28 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCamelCase__ : Optional[int] = """\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"""
class __magic_name__ (snake_case_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:int ):
snake_case__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self:int , _a:Optional[int] , _a:Dict , _a:Tuple , _a:List[str] , _a:List[Any] , *_a:Optional[int] , ):
snake_case__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"""Loading model {model_type}""" )
snake_case__ = model_type
snake_case__ = tf_checkpoint
snake_case__ = pytorch_dump_output
snake_case__ = config
snake_case__ = finetuning_task_name
def SCREAMING_SNAKE_CASE__ ( self:str ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
snake_case__ = self._tf_checkpoint
snake_case__ = ''
else:
snake_case__ = self._tf_checkpoint
snake_case__ = ''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 33 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = TypeVar("DatasetType", Dataset, IterableDataset)
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[List[float]] = None ,__UpperCamelCase: Optional[int] = None ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: Literal["first_exhausted", "all_exhausted"] = "first_exhausted" ,):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
else:
return _interleave_iterable_datasets(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,stopping_strategy=__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[DatasetType] ,__UpperCamelCase: Optional[DatasetInfo] = None ,__UpperCamelCase: Optional[NamedSplit] = None ,__UpperCamelCase: int = 0 ,):
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(__UpperCamelCase ):
if not isinstance(__UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(__UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'is an empty dataset dictionary.' )
raise ValueError(
f"Dataset at position {i} has at least one split: {list(__UpperCamelCase )}\n"
f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__UpperCamelCase ) )}']" )
raise ValueError(
f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__UpperCamelCase ).__name__}." )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = (
(Dataset, IterableDataset) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError(
f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
else:
return _concatenate_iterable_datasets(__UpperCamelCase ,info=__UpperCamelCase ,split=__UpperCamelCase ,axis=__UpperCamelCase )
| 28 | 0 |
from math import ceil, sqrt
def lowercase_ (A : int = 1_0_0_0_0_0_0 ):
snake_case__ : Union[str, Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
snake_case__ : Tuple = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
snake_case__ : Union[str, Any] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 478 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A, 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(A, 'num_attention_heads' ) )
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=32, A=2, A=3, A=640, A=4, A="silu", A=3, A=32, A=0.1, A=0.1, A=0.1, A=0.02, A=True, A=True, A=10, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = conv_kernel_size
SCREAMING_SNAKE_CASE : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size], self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
SCREAMING_SNAKE_CASE : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = MobileViTModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = MobileViTForImageClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : str = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
SCREAMING_SNAKE_CASE : int = model(A, labels=A )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
A : List[Any] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : Optional[int] = False
A : Dict = False
A : List[Any] = False
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : str = MobileViTConfigTester(self, config_class=A, has_text_modality=A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(A )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Any = ['pixel_values']
self.assertListEqual(arg_names[:1], A )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
def check_hidden_states_output(A, A, A ):
SCREAMING_SNAKE_CASE : Any = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**self._prepare_for_class(A, A ) )
SCREAMING_SNAKE_CASE : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE : List[str] = 5
self.assertEqual(len(A ), A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : int = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = True
check_hidden_states_output(A, A, A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
check_hidden_states_output(A, A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(A )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Dict = prepare_img()
SCREAMING_SNAKE_CASE : Dict = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(**A )
# verify the logits
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, A )
SCREAMING_SNAKE_CASE : int = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = model.to(A )
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : str = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Dict = model(**A )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, A )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
], device=A, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : List[str] = model.to(A )
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Any = image_processor(images=A, return_tensors='pt' ).to(A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**A )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : Dict = image_processor.post_process_semantic_segmentation(outputs=A, target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : Dict = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, A )
SCREAMING_SNAKE_CASE : Tuple = image_processor.post_process_semantic_segmentation(outputs=A )
SCREAMING_SNAKE_CASE : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, A )
| 28 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Dict = 'xlm'
a__: str = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , UpperCAmelCase=3_0145 , UpperCAmelCase=2048 , UpperCAmelCase=12 , UpperCAmelCase=16 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=1 , UpperCAmelCase=True , UpperCAmelCase=512 , UpperCAmelCase=2048**-0.5 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=5 , UpperCAmelCase=True , UpperCAmelCase="first" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=0.1 , UpperCAmelCase=5 , UpperCAmelCase=5 , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
lowerCamelCase_ = vocab_size
lowerCamelCase_ = emb_dim
lowerCamelCase_ = n_layers
lowerCamelCase_ = n_heads
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = gelu_activation
lowerCamelCase_ = sinusoidal_embeddings
lowerCamelCase_ = causal
lowerCamelCase_ = asm
lowerCamelCase_ = n_langs
lowerCamelCase_ = use_lang_emb
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = bos_index
lowerCamelCase_ = eos_index
lowerCamelCase_ = pad_index
lowerCamelCase_ = unk_index
lowerCamelCase_ = mask_index
lowerCamelCase_ = is_encoder
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = embed_init_std
lowerCamelCase_ = init_std
lowerCamelCase_ = summary_type
lowerCamelCase_ = summary_use_proj
lowerCamelCase_ = summary_activation
lowerCamelCase_ = summary_proj_to_labels
lowerCamelCase_ = summary_first_dropout
lowerCamelCase_ = start_n_top
lowerCamelCase_ = end_n_top
lowerCamelCase_ = mask_token_id
lowerCamelCase_ = lang_id
if "n_words" in kwargs:
lowerCamelCase_ = kwargs['''n_words''']
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 29 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from math import ceil
def lowercase ( lowerCAmelCase__ = 1_001 ):
lowerCamelCase_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
lowerCamelCase_ = 2 * i + 1
lowerCamelCase_ = 2 * i
lowerCamelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
"""simple docstring"""
import numpy
# List of input, output pairs
A_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
A_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
A_ = [2, 4, 1, 5]
A_ = len(train_data)
A_ = 0.009
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="train" ):
return calculate_hypothesis_value(lowerCAmelCase__ ,lowerCAmelCase__ ) - output(
lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = 0
for i in range(len(lowerCAmelCase__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=m ):
lowerCamelCase_ = 0
for i in range(lowerCAmelCase__ ):
if index == -1:
summation_value += _error(lowerCAmelCase__ )
else:
summation_value += _error(lowerCAmelCase__ ) * train_data[i][0][index]
return summation_value
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = summation_of_cost_derivative(lowerCAmelCase__ ,lowerCAmelCase__ ) / m
return cost_derivative_value
def lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase_ = 0.000_002
lowerCamelCase_ = 0
lowerCamelCase_ = 0
while True:
j += 1
lowerCamelCase_ = [0, 0, 0, 0]
for i in range(0 ,len(lowerCAmelCase__ ) ):
lowerCamelCase_ = get_cost_derivative(i - 1 )
lowerCamelCase_ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase__ ,lowerCAmelCase__ ,atol=lowerCAmelCase__ ,rtol=lowerCAmelCase__ ,):
break
lowerCamelCase_ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowercase ( ):
for i in range(len(lowerCAmelCase__ ) ):
print(('''Actual output value:''', output(lowerCAmelCase__ ,'''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase__ ,'''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ = 50_000_000 ):
lowerCamelCase_ = set()
lowerCamelCase_ = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ = set(range(3 ,prime_square_limit + 1 ,2 ) )
primes.add(2 )
for p in range(3 ,prime_square_limit + 1 ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,prime_square_limit + 1 ,lowerCAmelCase__ ) ) )
for primea in primes:
lowerCamelCase_ = primea * primea
for primea in primes:
lowerCamelCase_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ = primea * primea * primea * primea
lowerCamelCase_ = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
A_ = 4
A_ = 3
class __lowerCamelCase ( lowerCAmelCase ):
pass
def lowercase ( lowerCAmelCase__ ):
for shard in shards:
for i in range(lowerCAmelCase__ ):
yield {"i": i, "shard": shard}
def lowercase ( ):
lowerCamelCase_ = int(os.environ['''RANK'''] )
lowerCamelCase_ = int(os.environ['''WORLD_SIZE'''] )
lowerCamelCase_ = ArgumentParser()
parser.add_argument('''--streaming''' ,type=lowerCAmelCase__ )
parser.add_argument('''--local_rank''' ,type=lowerCAmelCase__ )
parser.add_argument('''--num_workers''' ,type=lowerCAmelCase__ ,default=0 )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.streaming
lowerCamelCase_ = args.num_workers
lowerCamelCase_ = {'''shards''': [f"shard_{shard_idx}" for shard_idx in range(lowerCAmelCase__ )]}
lowerCamelCase_ = IterableDataset.from_generator(lowerCAmelCase__ ,gen_kwargs=lowerCAmelCase__ )
if not streaming:
lowerCamelCase_ = Dataset.from_list(list(lowerCAmelCase__ ) )
lowerCamelCase_ = split_dataset_by_node(lowerCAmelCase__ ,rank=lowerCAmelCase__ ,world_size=lowerCAmelCase__ )
lowerCamelCase_ = torch.utils.data.DataLoader(lowerCAmelCase__ ,num_workers=lowerCAmelCase__ )
lowerCamelCase_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowerCamelCase_ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowerCamelCase_ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
from manim import *
class __lowerCamelCase ( lowerCAmelCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = Text('''CPU''' , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = Text('''GPU''' , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = Text('''Model''' , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
lowerCamelCase_ = []
for i, rect in enumerate(UpperCAmelCase ):
rect.set_stroke(UpperCAmelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCamelCase_ = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=UpperCAmelCase , buff=0.0 )
self.add(UpperCAmelCase )
cpu_targs.append(UpperCAmelCase )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
lowerCamelCase_ = Text('''Loaded Checkpoint''' , font_size=24 )
lowerCamelCase_ = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , aligned_edge=UpperCAmelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
lowerCamelCase_ = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.play(Write(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(UpperCAmelCase ):
lowerCamelCase_ = fill.copy().set_fill(UpperCAmelCase , opacity=0.7 )
target.move_to(UpperCAmelCase )
first_animations.append(GrowFromCenter(UpperCAmelCase , run_time=1 ) )
lowerCamelCase_ = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 29 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 1 |
"""simple docstring"""
from PIL import Image
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
def brightness(lowerCAmelCase__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(lowerCAmelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
A_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 29 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 29 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
A_ = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[int] = 'blip_2_vision_model'
def __init__( self , UpperCAmelCase=1408 , UpperCAmelCase=6144 , UpperCAmelCase=39 , UpperCAmelCase=16 , UpperCAmelCase=224 , UpperCAmelCase=14 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0_0_0_0_1 , UpperCAmelCase=0.0 , UpperCAmelCase=1e-1_0 , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(**UpperCAmelCase )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = patch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = hidden_act
lowerCamelCase_ = qkv_bias
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[int] = 'blip_2_qformer'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="absolute" , UpperCAmelCase=2 , UpperCAmelCase=1408 , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = cross_attention_frequency
lowerCamelCase_ = encoder_hidden_size
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
cls._set_token_in_kwargs(UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowerCamelCase_ = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class __lowerCamelCase ( lowerCAmelCase ):
a__: Union[str, Any] = 'blip-2'
a__: str = True
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=32 , **UpperCAmelCase ):
super().__init__(**UpperCAmelCase )
if vision_config is None:
lowerCamelCase_ = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase_ = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase_ = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase_ = BlipaVisionConfig(**UpperCAmelCase )
lowerCamelCase_ = BlipaQFormerConfig(**UpperCAmelCase )
lowerCamelCase_ = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase_ = CONFIG_MAPPING[text_model_type](**UpperCAmelCase )
lowerCamelCase_ = self.text_config.tie_word_embeddings
lowerCamelCase_ = self.text_config.is_encoder_decoder
lowerCamelCase_ = num_query_tokens
lowerCamelCase_ = self.vision_config.hidden_size
lowerCamelCase_ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ = 1.0
lowerCamelCase_ = 0.0_2
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.vision_config.to_dict()
lowerCamelCase_ = self.qformer_config.to_dict()
lowerCamelCase_ = self.text_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
A_ = re.compile(R"""\s+""")
def lowercase ( lowerCAmelCase__ ):
return {"hash": hashlib.mda(re.sub(lowerCAmelCase__ ,'''''' ,example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [len(lowerCAmelCase__ ) for line in example['''content'''].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase__ ), "line_max": max(lowerCAmelCase__ )}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=5 ):
lowerCamelCase_ = ['''auto-generated''', '''autogenerated''', '''automatically generated''']
lowerCamelCase_ = example['''content'''].splitlines()
for _, line in zip(range(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=5 ,lowerCAmelCase__=0.05 ):
lowerCamelCase_ = ['''unit tests''', '''test file''', '''configuration file''']
lowerCamelCase_ = example['''content'''].splitlines()
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# first test
for _, line in zip(range(lowerCAmelCase__ ) ,lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase_ = example['''content'''].count('''\n''' )
lowerCamelCase_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = ['''def ''', '''class ''', '''for ''', '''while ''']
lowerCamelCase_ = example['''content'''].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=4 ):
lowerCamelCase_ = example['''content'''].splitlines()
lowerCamelCase_ = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = tokenizer(example['''content'''] ,truncation=lowerCAmelCase__ )['''input_ids''']
lowerCamelCase_ = len(example['''content'''] ) / len(lowerCAmelCase__ )
return {"ratio": ratio}
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = {}
results.update(get_hash(lowerCAmelCase__ ) )
results.update(line_stats(lowerCAmelCase__ ) )
results.update(alpha_stats(lowerCAmelCase__ ) )
results.update(char_token_ratio(lowerCAmelCase__ ) )
results.update(is_autogenerated(lowerCAmelCase__ ) )
results.update(is_config_or_test(lowerCAmelCase__ ) )
results.update(has_no_keywords(lowerCAmelCase__ ) )
results.update(has_few_assignments(lowerCAmelCase__ ) )
return results
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if not check_uniques(lowerCAmelCase__ ,lowerCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowercase ( lowerCAmelCase__ ):
with open(lowerCAmelCase__ ,'''rb''' ) as f_in:
with gzip.open(str(lowerCAmelCase__ ) + '''.gz''' ,'''wb''' ,compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase__ ,lowerCAmelCase__ )
os.unlink(lowerCAmelCase__ )
# Settings
A_ = HfArgumentParser(PreprocessingArguments)
A_ = parser.parse_args()
if args.num_workers is None:
A_ = multiprocessing.cpu_count()
A_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
A_ = time.time()
A_ = load_dataset(args.dataset_name, split="""train""")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
A_ = time.time()
A_ = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
A_ = set(ds.unique("""hash"""))
A_ = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
A_ = time.time()
A_ = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
A_ = time.time()
A_ , A_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
A_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
A_ = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
A_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
A_ = str(data_dir / f"file-{file_number+1:012}.json")
A_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Union[str, Any] = RobertaTokenizer
a__: Any = RobertaTokenizerFast
a__: int = True
a__: List[str] = {'cls_token': '<s>'}
def UpperCAmelCase__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowerCamelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase_ = {'''unk_token''': '''<unk>'''}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowerCamelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = '''Encode this sequence.'''
lowerCamelCase_ = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# Testing spaces after special tokens
lowerCamelCase_ = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )} ) # mask token has a left space
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = '''Encode <mask> sequence'''
lowerCamelCase_ = '''Encode <mask>sequence'''
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase )
lowerCamelCase_ = encoded.index(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase )
lowerCamelCase_ = encoded.index(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase__ ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCAmelCase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCAmelCase )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase_ = f"{text_of_1_token} {text_of_1_token}"
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A_ = tuple[int, int]
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = vertices
lowerCamelCase_ = {
(min(UpperCAmelCase ), max(UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase_ = weight
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Graph({min(self.vertices )} , {} )
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase_ = edge
lowerCamelCase_ = weight
subgraph.add_edge(UpperCAmelCase , UpperCAmelCase )
return subgraph
def lowercase ( lowerCAmelCase__ = "p107_network.txt" ):
lowerCamelCase_ = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read().strip().split('''\n''' )
lowerCamelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 ,len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase_ = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase_ = Graph(set(range(len(lowerCAmelCase__ ) ) ) ,lowerCAmelCase__ )
lowerCamelCase_ = graph.prims_algorithm()
lowerCamelCase_ = sum(graph.edges.values() )
lowerCamelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
A_ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
A_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
A_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ = 1_000_000 ):
lowerCamelCase_ = set(range(3 ,lowerCAmelCase__ ,2 ) )
primes.add(2 )
for p in range(3 ,lowerCAmelCase__ ,2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p ,lowerCAmelCase__ ,lowerCAmelCase__ ) ) )
lowerCamelCase_ = [float(lowerCAmelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase__ ,limit + 1 ,lowerCAmelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = 'roberta-prelayernorm'
def __init__( self , UpperCAmelCase=5_0265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = classifier_dropout
class __lowerCamelCase ( lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ = concatenate_datasets
A_ = DownloadConfig
A_ = DownloadManager
A_ = DownloadMode
A_ = DownloadConfig
A_ = DownloadMode
A_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
A_ = logging.getLogger(__name__)
class __lowerCamelCase :
def __init__( self ):
lowerCamelCase_ = False
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not self.initialized:
lowerCamelCase_ = RagRetriever(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
lowerCamelCase_ = True
def UpperCAmelCase__ ( self ):
self.retriever.index.init_index()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = self.retriever._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ):
if index is not None and index.is_initialized() and len(UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , index=UpperCAmelCase , init_retrieval=UpperCAmelCase , )
lowerCamelCase_ = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
for worker in self.retrieval_workers
] )
def UpperCAmelCase__ ( self ):
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowerCamelCase_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowerCamelCase_ , lowerCamelCase_ = ray.get(random_worker.retrieve.remote(UpperCAmelCase , UpperCAmelCase ) )
else:
lowerCamelCase_ , lowerCamelCase_ = self._main_retrieve(UpperCAmelCase , UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return super(UpperCAmelCase , cls ).get_tokenizers(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''config''' , UpperCAmelCase ) or RagConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = RagTokenizer.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
lowerCamelCase_ = rag_tokenizer.question_encoder
lowerCamelCase_ = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCamelCase_ = '''custom'''
lowerCamelCase_ = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase )
else:
lowerCamelCase_ = cls._build_index(UpperCAmelCase )
return cls(
UpperCAmelCase , question_encoder_tokenizer=UpperCAmelCase , generator_tokenizer=UpperCAmelCase , retrieval_workers=UpperCAmelCase , index=UpperCAmelCase , )
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
a__: Union[str, Any] = ['input_features', 'attention_mask']
def __init__( self , UpperCAmelCase=80 , UpperCAmelCase=1_6000 , UpperCAmelCase=80 , UpperCAmelCase=0.0 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = do_ceptral_normalize
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = True
def UpperCAmelCase__ ( self , UpperCAmelCase , ):
lowerCamelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
lowerCamelCase_ = ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__( self , UpperCAmelCase , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase_ = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({'''input_features''': features} )
lowerCamelCase_ = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , UpperCAmelCase ):
lowerCamelCase_ = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ = (
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs['''input_features'''] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 29 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
A_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
A_ = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
A_ = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=False ):
if concatenate_texts:
return compute_measures(UpperCAmelCase , UpperCAmelCase )["wer"]
else:
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = compute_measures(UpperCAmelCase , UpperCAmelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 29 | 1 |
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase_ = cst_fwd.get(lowerCAmelCase__ ,np.inf )
lowerCamelCase_ = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase_ = new_cost_f
lowerCamelCase_ = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase_ = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = -1
lowerCamelCase_ = set()
lowerCamelCase_ = set()
lowerCamelCase_ = {source: 0}
lowerCamelCase_ = {destination: 0}
lowerCamelCase_ = {source: None}
lowerCamelCase_ = {destination: None}
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = PriorityQueue()
lowerCamelCase_ = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase_ , lowerCamelCase_ = queue_forward.get()
visited_forward.add(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = queue_backward.get()
visited_backward.add(lowerCAmelCase__ )
lowerCamelCase_ = pass_and_relaxation(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,)
lowerCamelCase_ = pass_and_relaxation(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,)
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase_ = shortest_distance
return shortest_path_distance
A_ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
A_ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __lowerCamelCase ( torch.nn.Module ):
def __init__( self , UpperCAmelCase="sayef/fsner-bert-base-uncased" ):
super(UpperCAmelCase , self ).__init__()
lowerCamelCase_ = AutoModel.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCamelCase_ = torch.nn.CosineSimilarity(3 , 1e-0_8 )
lowerCamelCase_ = torch.nn.Softmax(dim=1 )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return self.bert(**UpperCAmelCase ).last_hidden_state
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return token_embeddings.sum(2 , keepdim=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1 ):
return self.softmax(T * self.cos(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = W_supports['''sizes'''].tolist()
lowerCamelCase_ = W_supports['''start_token_id'''].item()
lowerCamelCase_ = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowerCamelCase_ = self.BERT(**UpperCAmelCase )
lowerCamelCase_ = self.BERT(**UpperCAmelCase )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = W_supports['''input_ids'''] == start_token_id
lowerCamelCase_ = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(UpperCAmelCase ):
if i == 0:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = support_sizes[i - 1]
lowerCamelCase_ = S[s : s + size][start_token_masks[s : s + size]]
lowerCamelCase_ = S[s : s + size][end_token_masks[s : s + size]]
lowerCamelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowerCamelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowerCamelCase_ = torch.vstack((p_starts, p_start) )
lowerCamelCase_ = torch.vstack((p_ends, p_end) )
else:
lowerCamelCase_ = p_start
lowerCamelCase_ = p_end
return p_starts, p_ends
| 29 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 20
lowerCamelCase_ = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase )
# tweak scores to not be uniform anymore
lowerCamelCase_ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase_ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase_ = jax.nn.softmax(UpperCAmelCase , axis=-1 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
lowerCamelCase_ = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase , scores.copy() , cur_len=UpperCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create ramp distribution
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase_ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, length) ).copy()
lowerCamelCase_ = top_k_warp_safety_check(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = None
lowerCamelCase_ = 10
lowerCamelCase_ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase_ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.1_5, 0.3, 0.3, 0.2_5]] ) )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase_ = np.exp(top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase_ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.2_5]] )
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase_ = np.broadcast_to(np.arange(UpperCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase_ = ramp_logits[1] * 1_0_0.0
# make sure at least 2 tokens are kept
lowerCamelCase_ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
# check that min length is applied at length 5
lowerCamelCase_ = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase_ = 5
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = 15
lowerCamelCase_ = min_dist_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase_ = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase_ = 1
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 20
lowerCamelCase_ = 4
lowerCamelCase_ = 0
lowerCamelCase_ = 5
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase_ = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase_ = 4
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase_ = 3
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = logits_processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
self.assertFalse(jnp.isinf(UpperCAmelCase ).any() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# with processor list
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = 4
lowerCamelCase_ = 10
lowerCamelCase_ = 15
lowerCamelCase_ = 2
lowerCamelCase_ = 1
lowerCamelCase_ = 15
# dummy input_ids and scores
lowerCamelCase_ = ids_tensor((batch_size, sequence_length) , UpperCAmelCase )
lowerCamelCase_ = input_ids.copy()
lowerCamelCase_ = self._get_uniform_logits(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = scores.copy()
# instantiate all dist processors
lowerCamelCase_ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase_ = FlaxTopKLogitsWarper(3 )
lowerCamelCase_ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase_ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase )
lowerCamelCase_ = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase , eos_token_id=UpperCAmelCase )
lowerCamelCase_ = 10
# no processor list
def run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = temp_dist_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_k_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = top_p_warp(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = min_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = bos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
lowerCamelCase_ = eos_dist_proc(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
# with processor list
def run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase_ = processor(UpperCAmelCase , UpperCAmelCase , cur_len=UpperCAmelCase )
return scores
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jax.jit(UpperCAmelCase )
lowerCamelCase_ = jitted_run_no_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = jitted_run_processor_list(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 29 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase ( lowerCAmelCase__ ):
if "model" in orig_key:
lowerCamelCase_ = orig_key.replace('''model.''' ,'''''' )
if "norm1" in orig_key:
lowerCamelCase_ = orig_key.replace('''norm1''' ,'''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowerCamelCase_ = orig_key.replace('''norm2''' ,'''output.LayerNorm''' )
if "norm" in orig_key:
lowerCamelCase_ = orig_key.replace('''norm''' ,'''LayerNorm''' )
if "transformer" in orig_key:
lowerCamelCase_ = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowerCamelCase_ = orig_key.replace(f"transformer_{layer_num}" ,f"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
lowerCamelCase_ = orig_key.replace('''mha.attn''' ,'''attention.self''' )
if "mha" in orig_key:
lowerCamelCase_ = orig_key.replace('''mha''' ,'''attention''' )
if "W_q" in orig_key:
lowerCamelCase_ = orig_key.replace('''W_q''' ,'''self.query''' )
if "W_k" in orig_key:
lowerCamelCase_ = orig_key.replace('''W_k''' ,'''self.key''' )
if "W_v" in orig_key:
lowerCamelCase_ = orig_key.replace('''W_v''' ,'''self.value''' )
if "ff1" in orig_key:
lowerCamelCase_ = orig_key.replace('''ff1''' ,'''intermediate.dense''' )
if "ff2" in orig_key:
lowerCamelCase_ = orig_key.replace('''ff2''' ,'''output.dense''' )
if "ff" in orig_key:
lowerCamelCase_ = orig_key.replace('''ff''' ,'''output.dense''' )
if "mlm_class" in orig_key:
lowerCamelCase_ = orig_key.replace('''mlm.mlm_class''' ,'''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowerCamelCase_ = orig_key.replace('''mlm''' ,'''cls.predictions.transform''' )
if "cls" not in orig_key:
lowerCamelCase_ = '''yoso.''' + orig_key
return orig_key
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
for key in orig_state_dict.copy().keys():
lowerCamelCase_ = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase_ = val
lowerCamelCase_ = orig_state_dict['''cls.predictions.decoder.bias''']
lowerCamelCase_ = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model_state_dict''']
lowerCamelCase_ = YosoConfig.from_json_file(lowerCAmelCase__ )
lowerCamelCase_ = YosoForMaskedLM(lowerCAmelCase__ )
lowerCamelCase_ = convert_checkpoint_helper(config.max_position_embeddings ,lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 29 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
A_ = 300 # TEMPERATURE (unit = K)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A_ = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def lowercase ( ):
lowerCamelCase_ = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ = g.get_repo('''huggingface/accelerate''' )
lowerCamelCase_ = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ = sorted([comment for comment in issue.get_comments()] ,key=lambda lowerCAmelCase__ : i.created_at ,reverse=lowerCAmelCase__ )
lowerCamelCase_ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
lowerCamelCase_ = dt.utcnow()
lowerCamelCase_ = (current_time - issue.updated_at).days
lowerCamelCase_ = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ = logging.get_logger(__name__)
# TODO: upload to AWS
A_ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[int] = 'retribert'
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=768 , UpperCAmelCase=8 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=True , UpperCAmelCase=128 , UpperCAmelCase=0 , **UpperCAmelCase , ):
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = share_encoders
lowerCamelCase_ = projection_dim
| 29 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ , lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="facebook/mbart-large-en-ro" ,lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowerCamelCase_ = torch.load(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
lowerCamelCase_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowerCamelCase_ = MBartConfig.from_pretrained(lowerCAmelCase__ ,vocab_size=lowerCAmelCase__ )
if mbart_aa and finetuned:
lowerCamelCase_ = '''relu'''
lowerCamelCase_ = state_dict['''decoder.embed_tokens.weight''']
lowerCamelCase_ = MBartForConditionalGeneration(lowerCAmelCase__ )
model.model.load_state_dict(lowerCAmelCase__ )
if finetuned:
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
A_ = parser.parse_args()
A_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 29 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
# Initialise PyTorch model
lowerCamelCase_ = BertConfig.from_json_file(lowerCAmelCase__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCamelCase_ = BertForPreTraining(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() ,lowerCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
A_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = CanineTokenizer
a__: Tuple = False
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase )
lowerCamelCase_ = 1024
return tokenizer
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
lowerCamelCase_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , UpperCAmelCase )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertIn('''token_type_ids''' , UpperCAmelCase )
@require_torch
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.canine_tokenizer
lowerCamelCase_ = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
lowerCamelCase_ = tokenizer(
text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
shutil.rmtree(UpperCAmelCase )
lowerCamelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase_ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCamelCase_ = chr(0Xe_007 )
additional_special_tokens.append(UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase )
lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ , lowerCamelCase_ = self.get_clean_sequence(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_005
lowerCamelCase_ = chr(UpperCAmelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
lowerCamelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , input_encoded + special_token_id )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = chr(0Xe_005 )
lowerCamelCase_ = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(len(UpperCAmelCase ) , 1 )
self.assertEqual(token_a[0] , UpperCAmelCase )
self.assertEqual(token_a[0] , UpperCAmelCase )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(UpperCAmelCase )
tokenizer.from_pretrained(UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCamelCase_ = json.load(UpperCAmelCase )
# a special token for Canine can be defined as follows:
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
lowerCamelCase_ = [new_token_a]
lowerCamelCase_ = [new_token_a]
with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCAmelCase , UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCamelCase_ = 0Xe_007
lowerCamelCase_ = chr(UpperCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )]
lowerCamelCase_ = tokenizer_class.from_pretrained(
UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 )
self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''hello world'''
if self.space_between_special_tokens:
lowerCamelCase_ = '''[CLS] hello world [SEP]'''
else:
lowerCamelCase_ = input
lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(UpperCAmelCase , [output, output.lower()] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
lowerCamelCase_ = '''a'''
lowerCamelCase_ = ord(UpperCAmelCase )
for attr in attributes_list:
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] )
lowerCamelCase_ = 0Xe_006
lowerCamelCase_ = chr(UpperCAmelCase )
setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
def UpperCAmelCase__ ( self ):
pass
| 29 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( lowerCAmelCase ):
a__: Any = (DDPMScheduler,)
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
lowerCamelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
self.check_over_configs(thresholding=UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = len(UpperCAmelCase )
lowerCamelCase_ = self.dummy_model()
lowerCamelCase_ = self.dummy_sample_deter
lowerCamelCase_ = torch.manual_seed(0 )
for t in reversed(range(UpperCAmelCase ) ):
# 1. predict noise residual
lowerCamelCase_ = model(UpperCAmelCase , UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , generator=UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase_ = pred_prev_sample
lowerCamelCase_ = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCamelCase_ = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase )
lowerCamelCase_ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase ):
if i == len(UpperCAmelCase ) - 1:
lowerCamelCase_ = -1
else:
lowerCamelCase_ = timesteps[i + 1]
lowerCamelCase_ = scheduler.previous_timestep(UpperCAmelCase )
lowerCamelCase_ = prev_t.item()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [100, 87, 50, 1, 0]
lowerCamelCase_ = len(UpperCAmelCase )
with self.assertRaises(UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase , timesteps=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.scheduler_classes[0]
lowerCamelCase_ = self.get_scheduler_config()
lowerCamelCase_ = scheduler_class(**UpperCAmelCase )
lowerCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase__ ):
def wrapper(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = func(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCamelCase_ = timeit.default_timer() - starttime
return delta
lowerCamelCase_ = func.__name__
return wrapper
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = []
lowerCamelCase_ = seq_shapes or {}
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase__ ,_ArrayXD ):
lowerCamelCase_ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase__ ,datasets.Value ):
if v.dtype == "string":
lowerCamelCase_ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCamelCase_ = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase__ ,datasets.Sequence ):
while isinstance(lowerCAmelCase__ ,datasets.Sequence ):
lowerCamelCase_ = v.feature
lowerCamelCase_ = seq_shapes[k]
lowerCamelCase_ = np.random.rand(*lowerCAmelCase__ ).astype(v.dtype )
lowerCamelCase_ = data
dummy_data.append((i, example) )
return dummy_data
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=100 ,lowerCAmelCase__=None ):
lowerCamelCase_ = generate_examples(lowerCAmelCase__ ,num_examples=lowerCAmelCase__ ,seq_shapes=lowerCAmelCase__ )
with ArrowWriter(features=lowerCAmelCase__ ,path=lowerCAmelCase__ ) as writer:
for key, record in dummy_data:
lowerCamelCase_ = features.encode_example(lowerCAmelCase__ )
writer.write(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCamelCase_ = datasets.Dataset.from_file(filename=lowerCAmelCase__ ,info=datasets.DatasetInfo(features=lowerCAmelCase__ ) )
return dataset
| 29 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCamelCase ( lowerCAmelCase ):
a__: bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = v.to_dict()
return d
| 29 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( lowerCAmelCase ):
a__: Optional[int] = 'decision_transformer'
a__: str = ['past_key_values']
a__: Dict = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCAmelCase=17 , UpperCAmelCase=4 , UpperCAmelCase=128 , UpperCAmelCase=4096 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=1024 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase="relu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.0_2 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=5_0256 , UpperCAmelCase=5_0256 , UpperCAmelCase=False , UpperCAmelCase=False , **UpperCAmelCase , ):
lowerCamelCase_ = state_dim
lowerCamelCase_ = act_dim
lowerCamelCase_ = hidden_size
lowerCamelCase_ = max_ep_len
lowerCamelCase_ = action_tanh
lowerCamelCase_ = vocab_size
lowerCamelCase_ = n_positions
lowerCamelCase_ = n_layer
lowerCamelCase_ = n_head
lowerCamelCase_ = n_inner
lowerCamelCase_ = activation_function
lowerCamelCase_ = resid_pdrop
lowerCamelCase_ = embd_pdrop
lowerCamelCase_ = attn_pdrop
lowerCamelCase_ = layer_norm_epsilon
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scale_attn_weights
lowerCamelCase_ = use_cache
lowerCamelCase_ = scale_attn_by_inverse_layer_idx
lowerCamelCase_ = reorder_and_upcast_attn
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 29 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
A_ = True
except ImportError:
A_ = False
try:
from torch.hub import _get_torch_home
A_ = _get_torch_home()
except ImportError:
A_ = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
A_ = os.path.join(torch_cache_home, """transformers""")
A_ = """https://cdn.huggingface.co"""
A_ = """https://s3.amazonaws.com/models.huggingface.co/bert"""
A_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
A_ = os.path.join(PATH, """config.yaml""")
A_ = os.path.join(PATH, """attributes.txt""")
A_ = os.path.join(PATH, """objects.txt""")
A_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
A_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
A_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
A_ = """pytorch_model.bin"""
A_ = """config.yaml"""
def lowercase ( lowerCAmelCase__=OBJECTS ,lowerCAmelCase__=ATTRIBUTES ):
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
lowerCamelCase_ = []
with open(lowerCAmelCase__ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = OrderedDict()
with open(lowerCAmelCase__ ,'''rb''' ) as f:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
lowerCamelCase_ = ckp.pop(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCamelCase_ = torch.tensor(lowerCAmelCase__ )
else:
assert isinstance(lowerCAmelCase__ ,torch.tensor ), type(lowerCAmelCase__ )
lowerCamelCase_ = v
return r
class __lowerCamelCase :
a__: Union[str, Any] = {}
def __init__( self , UpperCAmelCase , UpperCAmelCase = "root" , UpperCAmelCase=0 ):
lowerCamelCase_ = name
lowerCamelCase_ = level
lowerCamelCase_ = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = Config(UpperCAmelCase , name=UpperCAmelCase , level=level + 1 )
lowerCamelCase_ = v
setattr(self , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = d
def __repr__( self ):
return str(list((self._pointer.keys()) ) )
def __setattr__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = val
lowerCamelCase_ = val
lowerCamelCase_ = key.split('''.''' )
lowerCamelCase_ = len(UpperCAmelCase ) - 1
lowerCamelCase_ = self._pointer
if len(UpperCAmelCase ) > 1:
for i, l in enumerate(UpperCAmelCase ):
if hasattr(self , UpperCAmelCase ) and isinstance(getattr(self , UpperCAmelCase ) , UpperCAmelCase ):
setattr(getattr(self , UpperCAmelCase ) , '''.'''.join(levels[i:] ) , UpperCAmelCase )
if l == last_level:
lowerCamelCase_ = val
else:
lowerCamelCase_ = pointer[l]
def UpperCAmelCase__ ( self ):
return self._pointer
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
with open(f"{file_name}" , '''w''' ) as stream:
json.dump(UpperCAmelCase , UpperCAmelCase )
@staticmethod
def UpperCAmelCase__ ( UpperCAmelCase ):
with open(UpperCAmelCase ) as stream:
lowerCamelCase_ = load(UpperCAmelCase , Loader=UpperCAmelCase )
return data
def __str__( self ):
lowerCamelCase_ = ''' '''
if self._name != "root":
lowerCamelCase_ = f"{t * (self._level-1)}{self._name}:\n"
else:
lowerCamelCase_ = ''''''
lowerCamelCase_ = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
r += f"{t * (self._level)}{v}\n"
self._level += 1
else:
r += f"{t * (self._level)}{k}: {v} ({type(UpperCAmelCase ).__name__})\n"
lowerCamelCase_ = level
return r[:-1]
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
return cls(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls , UpperCAmelCase , **UpperCAmelCase ):
lowerCamelCase_ = kwargs.pop('''cache_dir''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''force_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''resume_download''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''proxies''' , UpperCAmelCase )
lowerCamelCase_ = kwargs.pop('''local_files_only''' , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
lowerCamelCase_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
elif os.path.isfile(UpperCAmelCase ) or is_remote_url(UpperCAmelCase ):
lowerCamelCase_ = pretrained_model_name_or_path
else:
lowerCamelCase_ = hf_bucket_url(UpperCAmelCase , filename=UpperCAmelCase , use_cdn=UpperCAmelCase )
try:
# Load from URL or cache if already cached
lowerCamelCase_ = cached_path(
UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , local_files_only=UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
lowerCamelCase_ = Config.load_yaml(UpperCAmelCase )
except EnvironmentError:
lowerCamelCase_ = '''Can\'t load config for'''
raise EnvironmentError(UpperCAmelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(UpperCAmelCase ), kwargs
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = torch.load('''dump.pt''' ,map_location=in_tensor.device )
lowerCamelCase_ = in_tensor.numpy()
lowerCamelCase_ = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ), (
f"{sum([1 for x in np.isclose(lowerCAmelCase__ ,lowerCAmelCase__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = urlparse(lowerCAmelCase__ )
return parsed.scheme in ("http", "https")
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
lowerCamelCase_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
lowerCamelCase_ = '''/''' not in model_id
if legacy_format:
return f"{endpoint}/{model_id}-{filename}"
else:
return f"{endpoint}/{model_id}/{filename}"
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=0 ,lowerCAmelCase__=None ,):
lowerCamelCase_ = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCAmelCase__ ,lowerCAmelCase__ ) for k, v in user_agent.items() )
elif isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
ua += "; " + user_agent
lowerCamelCase_ = {'''user-agent''': ua}
if resume_size > 0:
lowerCamelCase_ = '''bytes=%d-''' % (resume_size,)
lowerCamelCase_ = requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,headers=lowerCAmelCase__ )
if response.status_code == 416: # Range not satisfiable
return
lowerCamelCase_ = response.headers.get('''Content-Length''' )
lowerCamelCase_ = resume_size + int(lowerCAmelCase__ ) if content_length is not None else None
lowerCamelCase_ = tqdm(
unit='''B''' ,unit_scale=lowerCAmelCase__ ,total=lowerCAmelCase__ ,initial=lowerCAmelCase__ ,desc='''Downloading''' ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCAmelCase__ ) )
temp_file.write(lowerCAmelCase__ )
progress.close()
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=10 ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ ,exist_ok=lowerCAmelCase__ )
lowerCamelCase_ = None
if not local_files_only:
try:
lowerCamelCase_ = requests.head(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,timeout=lowerCAmelCase__ )
if response.status_code == 200:
lowerCamelCase_ = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
lowerCamelCase_ = url_to_filename(lowerCAmelCase__ ,lowerCAmelCase__ )
# get cache path to put the file
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCAmelCase__ ):
return cache_path
else:
lowerCamelCase_ = [
file
for file in fnmatch.filter(os.listdir(lowerCAmelCase__ ) ,filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCAmelCase__ ) > 0:
return os.path.join(lowerCAmelCase__ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lowerCamelCase_ = cache_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCAmelCase__ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
lowerCamelCase_ = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCAmelCase__ ,'''a+b''' ) as f:
yield f
lowerCamelCase_ = _resumable_file_manager
if os.path.exists(lowerCAmelCase__ ):
lowerCamelCase_ = os.stat(lowerCAmelCase__ ).st_size
else:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = partial(tempfile.NamedTemporaryFile ,dir=lowerCAmelCase__ ,delete=lowerCAmelCase__ )
lowerCamelCase_ = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' ,lowerCAmelCase__ ,temp_file.name ,)
http_get(
lowerCAmelCase__ ,lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_size=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,)
os.replace(temp_file.name ,lowerCAmelCase__ )
lowerCamelCase_ = {'''url''': url, '''etag''': etag}
lowerCamelCase_ = cache_path + '''.json'''
with open(lowerCAmelCase__ ,'''w''' ) as meta_file:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
return cache_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ):
lowerCamelCase_ = url.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
lowerCamelCase_ = url_hash.hexdigest()
if etag:
lowerCamelCase_ = etag.encode('''utf-8''' )
lowerCamelCase_ = shaaaa(lowerCAmelCase__ )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=None ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,):
if cache_dir is None:
lowerCamelCase_ = TRANSFORMERS_CACHE
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = str(lowerCAmelCase__ )
if is_remote_url(lowerCAmelCase__ ):
# URL, so get it from the cache (downloading if necessary)
lowerCamelCase_ = get_from_cache(
lowerCAmelCase__ ,cache_dir=lowerCAmelCase__ ,force_download=lowerCAmelCase__ ,proxies=lowerCAmelCase__ ,resume_download=lowerCAmelCase__ ,user_agent=lowerCAmelCase__ ,local_files_only=lowerCAmelCase__ ,)
elif os.path.exists(lowerCAmelCase__ ):
# File, and it exists.
lowerCamelCase_ = url_or_filename
elif urlparse(lowerCAmelCase__ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCAmelCase__ ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCAmelCase__ ) )
if extract_compressed_file:
if not is_zipfile(lowerCAmelCase__ ) and not tarfile.is_tarfile(lowerCAmelCase__ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
lowerCamelCase_ , lowerCamelCase_ = os.path.split(lowerCAmelCase__ )
lowerCamelCase_ = output_file.replace('''.''' ,'''-''' ) + '''-extracted'''
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ) and os.listdir(lowerCAmelCase__ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lowerCamelCase_ = output_path + '''.lock'''
with FileLock(lowerCAmelCase__ ):
shutil.rmtree(lowerCAmelCase__ ,ignore_errors=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ )
if is_zipfile(lowerCAmelCase__ ):
with ZipFile(lowerCAmelCase__ ,'''r''' ) as zip_file:
zip_file.extractall(lowerCAmelCase__ )
zip_file.close()
elif tarfile.is_tarfile(lowerCAmelCase__ ):
lowerCamelCase_ = tarfile.open(lowerCAmelCase__ )
tar_file.extractall(lowerCAmelCase__ )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCAmelCase__ ) )
return output_path_extracted
return output_path
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="," ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = eval(f.read() )
else:
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
try:
lowerCamelCase_ = requests.json()
except Exception:
lowerCamelCase_ = req.content.decode()
assert data is not None, "could not connect"
try:
lowerCamelCase_ = eval(lowerCAmelCase__ )
except Exception:
lowerCamelCase_ = data.split('''\n''' )
req.close()
return data
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = requests.get(lowerCAmelCase__ )
lowerCamelCase_ = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCAmelCase__ )
with open(lowerCAmelCase__ ,'''rb''' ) as stream:
lowerCamelCase_ = pkl.load(lowerCAmelCase__ )
lowerCamelCase_ = weights.pop('''model''' )
lowerCamelCase_ = {}
for k, v in model.items():
lowerCamelCase_ = torch.from_numpy(lowerCAmelCase__ )
if "running_var" in k:
lowerCamelCase_ = torch.tensor([0] )
lowerCamelCase_ = k.replace('''running_var''' ,'''num_batches_tracked''' )
lowerCamelCase_ = zero
return new
def lowercase ( ):
print(f"{os.path.abspath(os.path.join(lowerCAmelCase__ ,os.pardir ) )}/demo.ipynb" )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__="RGB" ):
assert isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
if os.path.isfile(lowerCAmelCase__ ):
lowerCamelCase_ = cva.imread(lowerCAmelCase__ )
else:
lowerCamelCase_ = get_image_from_url(lowerCAmelCase__ )
assert img is not None, f"could not connect to: {im}"
lowerCamelCase_ = cva.cvtColor(lowerCAmelCase__ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
lowerCamelCase_ = img[:, :, ::-1]
return img
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__=1 ):
return (images[i : i + batch] for i in range(0 ,len(lowerCAmelCase__ ) ,lowerCAmelCase__ ))
| 29 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCamelCase ( lowerCAmelCase ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 29 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
A_ = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase :
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a__: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
a__: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a__: Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
a__: Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase_ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase_ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __lowerCamelCase :
a__: str = field(
default=lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__: Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a__: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a__: bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase_ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase_ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase_ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase_ = load_dataset('''csv''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase_ = load_dataset('''json''' ,data_files=lowerCAmelCase__ ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase_ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase_ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=lowerCAmelCase__ ,)
lowerCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=lowerCAmelCase__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
lowerCamelCase_ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ ):
lowerCamelCase_ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase_ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase_ = examples['''statement''']
lowerCamelCase_ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase_ = tokenizer(lowerCAmelCase__ ,lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
lowerCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase_ = raw_datasets.map(
lowerCAmelCase__ ,batched=lowerCAmelCase__ ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) ,3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
lowerCamelCase_ = p.predictions[0] if isinstance(p.predictions ,lowerCAmelCase__ ) else p.predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase_ = default_data_collator
elif training_args.fpaa:
lowerCamelCase_ = DataCollatorWithPadding(lowerCAmelCase__ ,pad_to_multiple_of=8 )
else:
lowerCamelCase_ = None
# Initialize our Trainer
lowerCamelCase_ = Trainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,data_collator=lowerCAmelCase__ ,)
# Training
if training_args.do_train:
lowerCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ = last_checkpoint
lowerCamelCase_ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_metrics('''train''' ,lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCamelCase_ = min(lowerCAmelCase__ ,len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' ,lowerCAmelCase__ )
trainer.save_metrics('''eval''' ,lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase_ = predict_dataset.remove_columns('''label''' )
lowerCamelCase_ = trainer.predict(lowerCAmelCase__ ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase_ = np.argmax(lowerCAmelCase__ ,axis=1 )
lowerCamelCase_ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCamelCase_ = label_list[item]
writer.write(f"{index}\t{item}\n" )
lowerCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 29 | 1 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A_ = """\
@inproceedings{lin-2004-rouge,
title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",
author = \"Lin, Chin-Yew\",
booktitle = \"Text Summarization Branches Out\",
month = jul,
year = \"2004\",
address = \"Barcelona, Spain\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W04-1013\",
pages = \"74--81\",
}
"""
A_ = """\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
"""
A_ = """
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,
`\"rougeL\"`: Longest common subsequence based scoring.
`\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric('rouge')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
>>> print(results[\"rouge1\"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results[\"rouge1\"].mid.fmeasure)
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ):
if rouge_types is None:
lowerCamelCase_ = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowerCamelCase_ = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase , use_stemmer=UpperCAmelCase )
if use_aggregator:
lowerCamelCase_ = scoring.BootstrapAggregator()
else:
lowerCamelCase_ = []
for ref, pred in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = scorer.score(UpperCAmelCase , UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase )
else:
scores.append(UpperCAmelCase )
if use_aggregator:
lowerCamelCase_ = aggregator.aggregate()
else:
lowerCamelCase_ = {}
for key in scores[0]:
lowerCamelCase_ = [score[key] for score in scores]
return result
| 29 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
lowerCamelCase_ = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase_ = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCamelCase_ = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCamelCase_ = -(labels.shape[-1] * loss.item())
lowerCamelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCAmelCase__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowerCamelCase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCAmelCase__ ) )
]
def lowercase ( lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowerCamelCase_ = len(lowerCAmelCase__ )
lowerCamelCase_ = matrix_length // 2
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ ,lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
lowerCamelCase_ = [
[a[i][j] for j in range(lowerCAmelCase__ ,lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ,lowerCAmelCase__ )
]
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ )]
lowerCamelCase_ = [[a[i][j] for j in range(lowerCAmelCase__ )] for i in range(lowerCAmelCase__ ,lowerCAmelCase__ )]
return top_left, top_right, bot_left, bot_right
def lowercase ( lowerCAmelCase__ ):
return len(lowerCAmelCase__ ), len(matrix[0] )
def lowercase ( lowerCAmelCase__ ):
print('''\n'''.join(str(lowerCAmelCase__ ) for line in matrix ) )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if matrix_dimensions(lowerCAmelCase__ ) == (2, 2):
return default_matrix_multiplication(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = split_matrix(lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = split_matrix(lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = actual_strassen(matrix_subtraction(lowerCAmelCase__ ,lowerCAmelCase__ ) ,matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCamelCase_ = matrix_addition(matrix_subtraction(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
# construct the new matrix from our 4 quadrants
lowerCamelCase_ = []
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCAmelCase__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if matrix_dimensions(lowerCAmelCase__ )[1] != matrix_dimensions(lowerCAmelCase__ )[0]:
lowerCamelCase_ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(lowerCAmelCase__ )
lowerCamelCase_ = matrix_dimensions(lowerCAmelCase__ )
lowerCamelCase_ = matrix_dimensions(lowerCAmelCase__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCamelCase_ = max(*lowerCAmelCase__ ,*lowerCAmelCase__ )
lowerCamelCase_ = int(math.pow(2 ,math.ceil(math.loga(lowerCAmelCase__ ) ) ) )
lowerCamelCase_ = matrixa
lowerCamelCase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCamelCase_ = actual_strassen(lowerCAmelCase__ ,lowerCAmelCase__ )
# Removing the additional zeros
for i in range(0 ,lowerCAmelCase__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowerCAmelCase__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
A_ = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
A_ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 29 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = generate_pascal_triangle(lowerCAmelCase__ )
for row_idx in range(lowerCAmelCase__ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] ,end=''' ''' )
else:
print(triangle[row_idx][col_idx] ,end='''''' )
print()
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = []
for current_row_idx in range(lowerCAmelCase__ ):
lowerCamelCase_ = populate_current_row(lowerCAmelCase__ ,lowerCAmelCase__ )
triangle.append(lowerCAmelCase__ )
return triangle
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase_ , lowerCamelCase_ = 1, 1
for current_col_idx in range(1 ,lowerCAmelCase__ ):
calculate_current_element(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return current_row
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase_ = triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase_ = above_to_left_elt + above_to_right_elt
def lowercase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCamelCase_ = [[1]]
for row_index in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = [0] + result[-1] + [0]
lowerCamelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase_ = sum(divmod(lowerCAmelCase__ ,2 ) )
lowerCamelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 ,distinct_elements + 1 )
]
lowerCamelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase_ = row_first_half + row_second_half
result.append(lowerCAmelCase__ )
return result
def lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ ) -> None:
lowerCamelCase_ = f"{func.__name__}({value})"
lowerCamelCase_ = timeit(f"__main__.{call}" ,setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCAmelCase__ ,lowerCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ):
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
lowerCamelCase_ = projection_dim
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRContextEncoder(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = TFDPRReader(config=UpperCAmelCase )
lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
a__: Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
a__: Optional[int] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
a__: str = False
a__: Any = False
a__: int = False
a__: Dict = False
a__: Any = False
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFDPRModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
lowerCamelCase_ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCamelCase_ = model(UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCamelCase_ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 29 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 1 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = word.split()
def justify(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) -> str:
lowerCamelCase_ = max_width - width
lowerCamelCase_ = len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCamelCase_ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCamelCase_ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCamelCase_ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase__ ):
num_spaces_between_words_list[i] += 1
lowerCamelCase_ = []
for i in range(lowerCAmelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase__ )
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = 0
for word in words:
if width + len(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase__ )
width += len(lowerCAmelCase__ )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) )
# reset new line and new width
lowerCamelCase_ , lowerCamelCase_ = [word], len(lowerCAmelCase__ )
lowerCamelCase_ = max_width - width - len(lowerCAmelCase__ )
answer.append(''' '''.join(lowerCAmelCase__ ) + (remaining_spaces + 1) * ''' ''' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase_ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
lowerCamelCase_ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase_ = [4, 4, 4, 4]
lowerCamelCase_ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase_ = [3, 3, 3, 3]
else:
lowerCamelCase_ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase_ = 96
elif "small" in model_name:
lowerCamelCase_ = 96
elif "base" in model_name:
lowerCamelCase_ = 128
elif "large" in model_name:
lowerCamelCase_ = 192
elif "xlarge" in model_name:
lowerCamelCase_ = 256
elif "huge" in model_name:
lowerCamelCase_ = 352
# set label information
lowerCamelCase_ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase_ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase_ = '''imagenet-1k-id2label.json'''
lowerCamelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = FocalNetConfig(
embed_dim=lowerCAmelCase__ ,depths=lowerCAmelCase__ ,focal_levels=lowerCAmelCase__ ,focal_windows=lowerCAmelCase__ ,use_conv_embed=lowerCAmelCase__ ,idalabel=lowerCAmelCase__ ,labelaid=lowerCAmelCase__ ,use_post_layernorm=lowerCAmelCase__ ,use_layerscale=lowerCAmelCase__ ,)
return config
def lowercase ( lowerCAmelCase__ ):
if "patch_embed.proj" in name:
lowerCamelCase_ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase_ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase_ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase_ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase_ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase_ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase_ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase_ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase_ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase_ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase_ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase_ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase_ = '''focalnet.''' + name
return name
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
# fmt: off
lowerCamelCase_ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase_ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,lowerCAmelCase__ )
lowerCamelCase_ = torch.hub.load_state_dict_from_url(lowerCAmelCase__ ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
lowerCamelCase_ = val
lowerCamelCase_ = get_focalnet_config(lowerCAmelCase__ )
lowerCamelCase_ = FocalNetForImageClassification(lowerCAmelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# verify conversion
lowerCamelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ = BitImageProcessor(
do_resize=lowerCAmelCase__ ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCAmelCase__ ,crop_size=224 ,do_normalize=lowerCAmelCase__ ,image_mean=lowerCAmelCase__ ,image_std=lowerCAmelCase__ ,)
lowerCamelCase_ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
lowerCamelCase_ = processor(images=lowerCAmelCase__ ,return_tensors='''pt''' )
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ),
] )
lowerCamelCase_ = image_transforms(lowerCAmelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,lowerCAmelCase__ ,atol=1E-4 )
lowerCamelCase_ = model(**lowerCAmelCase__ )
lowerCamelCase_ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase_ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowerCamelCase_ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase_ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowerCamelCase_ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase_ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
A_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ):
if start is None:
lowerCamelCase_ = 0
if end is None:
lowerCamelCase_ = len(lowerCAmelCase__ ) - 1
if start >= end:
return
lowerCamelCase_ = (start + end) // 2
slowsort(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
slowsort(lowerCAmelCase__ ,mid + 1 ,lowerCAmelCase__ )
if sequence[end] < sequence[mid]:
lowerCamelCase_ , lowerCamelCase_ = sequence[mid], sequence[end]
slowsort(lowerCAmelCase__ ,lowerCAmelCase__ ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
A_ = logging.get_logger(__name__)
class __lowerCamelCase :
a__: Tuple = None
@experimental
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return _map_with_joblib(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = num_proc if num_proc <= len(lowerCAmelCase__ ) else len(lowerCAmelCase__ )
lowerCamelCase_ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCAmelCase__ ):
lowerCamelCase_ = len(lowerCAmelCase__ ) // num_proc
lowerCamelCase_ = len(lowerCAmelCase__ ) % num_proc
lowerCamelCase_ = div * index + min(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(lowerCAmelCase__ )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(lowerCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
lowerCamelCase_ , lowerCamelCase_ = None, None
if not disable_tqdm:
lowerCamelCase_ , lowerCamelCase_ = (RLock(),), tqdm.set_lock
with Pool(lowerCAmelCase__ ,initargs=lowerCAmelCase__ ,initializer=lowerCAmelCase__ ) as pool:
lowerCamelCase_ = pool.map(lowerCAmelCase__ ,lowerCAmelCase__ )
logger.info(f"Finished {num_proc} processes" )
lowerCamelCase_ = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(lowerCAmelCase__ )} objects" )
return mapped
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=lowerCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(lowerCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
lowerCamelCase_ = None
| 29 |
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A_ = datasets.logging.get_logger(__name__)
A_ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
A_ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
A_ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ,lowerCAmelCase__=False ,lowerCAmelCase__=True ,lowerCAmelCase__=False ,lowerCAmelCase__="dummy_doc" ):
lowerCamelCase_ = {doc: key_lines}
lowerCamelCase_ = {doc: sys_lines}
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ , lowerCamelCase_ = reader.get_doc_mentions(lowerCAmelCase__ ,sys_doc_lines[doc] ,lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowerCamelCase_ = reader.set_annotated_parse_trees(lowerCAmelCase__ ,key_doc_lines[doc] ,lowerCAmelCase__ ,lowerCAmelCase__ )
if remove_nested:
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowerCamelCase_ , lowerCamelCase_ = reader.remove_nested_coref_mentions(lowerCAmelCase__ ,lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = reader.get_mention_assignments(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
'''files, respectively''' )
return doc_coref_infos
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = get_coref_infos(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for name, metric in metrics:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = evaluator.evaluate_documents(lowerCAmelCase__ ,lowerCAmelCase__ ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": fa} )
logger.info(
name.ljust(10 ) ,f"Recall: {recall * 100:.2f}" ,f" Precision: {precision * 100:.2f}" ,f" F1: {fa * 100:.2f}" ,)
if conll_subparts_num == 3:
lowerCamelCase_ = (conll / 3) * 100
logger.info(f"CoNLL score: {conll:.2f}" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
lowerCamelCase_ = line.split()[5]
if not parse_col == "-":
lowerCamelCase_ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
def UpperCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
lowerCamelCase_ = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
lowerCamelCase_ = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowerCamelCase_ = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 29 | 1 |
"""simple docstring"""
import string
from math import logaa
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCAmelCase__ ))
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return round(tf * idf ,3 )
| 29 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowerCamelCase_ = copy.deepcopy(UpperCAmelCase )
lowerCamelCase_ = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'''foo''': '''bar'''} )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
lowerCamelCase_ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
lowerCamelCase_ = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ ( cls ):
lowerCamelCase_ = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''test-generation-config''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
lowerCamelCase_ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 29 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = u
for i in range(1 ,lowerCAmelCase__ ):
lowerCamelCase_ = temp * (u - i)
return temp
def lowercase ( ):
lowerCamelCase_ = int(input('''enter the numbers of values: ''' ) )
lowerCamelCase_ = []
for _ in range(lowerCAmelCase__ ):
y.append([] )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
y[i].append(lowerCAmelCase__ )
lowerCamelCase_ = 0
print('''enter the values of parameters in a list: ''' )
lowerCamelCase_ = list(map(lowerCAmelCase__ ,input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowerCAmelCase__ ):
lowerCamelCase_ = float(input() )
lowerCamelCase_ = int(input('''enter the value to interpolate: ''' ) )
lowerCamelCase_ = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,lowerCAmelCase__ ):
for j in range(n - i ):
lowerCamelCase_ = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase_ = y[0][0]
for i in range(1 ,lowerCAmelCase__ ):
summ += (ucal(lowerCAmelCase__ ,lowerCAmelCase__ ) * y[0][i]) / math.factorial(lowerCAmelCase__ )
print(f"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 29 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A_ = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A_ = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 29 |
"""simple docstring"""
import math
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = [True] * n
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
lowerCamelCase_ = i * 2
while index < n:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 ,lowerCAmelCase__ ,2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def lowercase ( lowerCAmelCase__ = 999_966_663_333 ):
lowerCamelCase_ = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
lowerCamelCase_ = prime_sieve(lowerCAmelCase__ )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
lowerCamelCase_ = primes[prime_index + 1]
lowerCamelCase_ = last_prime**2
lowerCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
lowerCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
lowerCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
lowerCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
lowerCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 29 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowercase ( lowerCAmelCase__ ):
if hor == 128:
lowerCamelCase_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowerCamelCase_ = (32, 128, 256)
lowerCamelCase_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''')
elif hor == 32:
lowerCamelCase_ = ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''')
lowerCamelCase_ = (32, 64, 128, 256)
lowerCamelCase_ = ('''UpResnetBlock1D''', '''UpResnetBlock1D''', '''UpResnetBlock1D''')
lowerCamelCase_ = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'''down_block_types''': down_block_types,
'''block_out_channels''': block_out_channels,
'''up_block_types''': up_block_types,
'''layers_per_block''': 1,
'''use_timestep_embedding''': True,
'''out_block_type''': '''OutConv1DBlock''',
'''norm_num_groups''': 8,
'''downsample_each_block''': False,
'''in_channels''': 14,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''sample_size''': 65_536,
'''mid_block_type''': '''MidResTemporalBlock1D''',
'''act_fn''': '''mish''',
}
lowerCamelCase_ = UNetaDModel(**lowerCAmelCase__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() ,f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" ,'''w''' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
def lowercase ( ):
lowerCamelCase_ = {
'''in_channels''': 14,
'''down_block_types''': ('''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D''', '''DownResnetBlock1D'''),
'''up_block_types''': (),
'''out_block_type''': '''ValueFunction''',
'''mid_block_type''': '''ValueFunctionMidBlock1D''',
'''block_out_channels''': (32, 64, 128, 256),
'''layers_per_block''': 1,
'''downsample_each_block''': True,
'''sample_size''': 65_536,
'''out_channels''': 14,
'''extra_in_channels''': 0,
'''time_embedding_type''': '''positional''',
'''use_timestep_embedding''': True,
'''flip_sin_to_cos''': False,
'''freq_shift''': 1,
'''norm_num_groups''': 8,
'''act_fn''': '''mish''',
}
lowerCamelCase_ = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**lowerCAmelCase__ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(lowerCAmelCase__ )
hf_value_function.load_state_dict(lowerCAmelCase__ )
torch.save(hf_value_function.state_dict() ,'''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' ,'''w''' ) as f:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 29 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
A_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowercase ( lowerCAmelCase__ ):
lowerCamelCase_ = _TestCommandArgs(dataset=lowerCAmelCase__ ,all_configs=lowerCAmelCase__ ,save_infos=lowerCAmelCase__ )
lowerCamelCase_ = TestCommand(*lowerCAmelCase__ )
test_command.run()
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,'''README.md''' )
assert os.path.exists(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict.from_directory(lowerCAmelCase__ )
lowerCamelCase_ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) ,splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] ,download_size=3_940_680 ,dataset_size=2_589_981 ,)
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCamelCase_ , lowerCamelCase_ = getattr(dataset_infos['''default'''] ,lowerCAmelCase__ ), getattr(expected_dataset_infos['''default'''] ,lowerCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(lowerCAmelCase__ ,lowerCAmelCase__ )
elif key == "splits":
assert list(lowerCAmelCase__ ) == list(lowerCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes )
else:
result == expected
| 29 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.