code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( lowercase_ ):
def decorator(lowercase_ ):
UpperCAmelCase = getattr(snake_case_ , 'handle_key' , [] )
handle += [key]
setattr(snake_case_ , 'handle_key' , snake_case_ )
return func
return decorator
def _lowerCAmelCase ( *lowercase_ ):
def decorator(lowercase_ ):
UpperCAmelCase = getattr(snake_case_ , 'handle_key' , [] )
handle += keys
setattr(snake_case_ , 'handle_key' , snake_case_ )
return func
return decorator
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def __new__( cls :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :str ) -> Optional[int]:
UpperCAmelCase = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , 'key_handler' ):
setattr(a__ , 'key_handler' , {} )
setattr(a__ , 'handle_input' , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase = getattr(a__ , 'handle_key' , [] )
for key in handled_keys:
UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCAmelCase__ ( cls :Optional[int] ) -> Dict:
UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase = ord(a__ )
UpperCAmelCase = cls.key_handler.get(a__ )
if handler:
UpperCAmelCase = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: int ,lowerCamelCase_: int ) -> Any:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
UpperCAmelCase_ : Tuple = img
UpperCAmelCase_ : List[Any] = img.shape[1]
UpperCAmelCase_ : Any = img.shape[0]
UpperCAmelCase_ : Optional[int] = dst_width
UpperCAmelCase_ : int = dst_height
UpperCAmelCase_ : Any = self.src_w / self.dst_w
UpperCAmelCase_ : List[str] = self.src_h / self.dst_h
UpperCAmelCase_ : Any = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def A__ ( self: List[str] ) -> Optional[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCAmelCase_ : int = self.img[self.get_y(a__ )][self.get_x(a__ )]
def A__ ( self: Union[str, Any] ,lowerCamelCase_: int ) -> str:
return int(self.ratio_x * x )
def A__ ( self: List[Any] ,lowerCamelCase_: int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCamelCase_ ,UpperCamelCase_ = 800, 600
UpperCamelCase_ = imread('''image_data/lena.jpg''', 1)
UpperCamelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
from __future__ import annotations
A : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case_ ) )
] # the reference grid
__lowerCAmelCase = 1
__lowerCAmelCase = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case_ ) )
] # the action grid
__lowerCAmelCase = init[0]
__lowerCAmelCase = init[1]
__lowerCAmelCase = 0
__lowerCAmelCase = g + heuristic[x][y] # cost from starting cell to destination cell
__lowerCAmelCase = [[f, g, x, y]]
__lowerCAmelCase = False # flag that is set when search is complete
__lowerCAmelCase = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowerCAmelCase = cell.pop()
__lowerCAmelCase = next_cell[2]
__lowerCAmelCase = next_cell[3]
__lowerCAmelCase = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowerCAmelCase = True
else:
for i in range(len(snake_case_ ) ): # to try out different valid actions
__lowerCAmelCase = x + DIRECTIONS[i][0]
__lowerCAmelCase = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowerCAmelCase = g + cost
__lowerCAmelCase = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowerCAmelCase = 1
__lowerCAmelCase = i
__lowerCAmelCase = []
__lowerCAmelCase = goal[0]
__lowerCAmelCase = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowerCAmelCase = x - DIRECTIONS[action[x][y]][0]
__lowerCAmelCase = y - DIRECTIONS[action[x][y]][1]
__lowerCAmelCase = xa
__lowerCAmelCase = ya
invpath.append([x, y] )
__lowerCAmelCase = []
for i in range(len(snake_case_ ) ):
path.append(invpath[len(snake_case_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
A : Union[str, Any] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
A : Optional[Any] = [0, 0]
# all coordinates are given in format [y,x]
A : Tuple = [len(grid) - 1, len(grid[0]) - 1]
A : str = 1
# the cost map which pushes the path closer to the goal
A : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
A : Optional[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
A : str = 9_9
A , A : Tuple = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__a = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def a ( snake_case__: Dict , snake_case__: List[str] , snake_case__: str=None ):
'''simple docstring'''
if rng is None:
lowercase_ = random.Random()
lowercase_ = 1
for dim in shape:
total_dims *= dim
lowercase_ = []
for _ in range(snake_case_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowercase_ = np.array(snake_case_ , dtype=jnp.intaa ).reshape(snake_case_ )
return output
def a ( snake_case__: int , snake_case__: List[str]=None ):
'''simple docstring'''
lowercase_ = ids_tensor(snake_case_ , vocab_size=2 , rng=snake_case_ )
# make sure that at least one token is attended to for each batch
lowercase_ = 1
return attn_mask
@require_flax
class lowercase__:
"""simple docstring"""
a :Optional[int] = None
a :Union[str, Any] = ()
def _lowercase ( self : int ) -> Dict:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowercase_ = 2
lowercase_ = inputs['''input_ids'''].shape[-1] // 2
lowercase_ = inputs['''input_ids'''][:max_batch_size, :sequence_length]
lowercase_ = jnp.ones_like(a__ )
lowercase_ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowercase_ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowercase_ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _lowercase ( self : Optional[int] ) -> Any:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = False
lowercase_ = max_length
lowercase_ = 0
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase_ = getattr(a__ , a__ )
lowercase_ = pt_model_class(a__ ).eval()
lowercase_ = load_flax_weights_in_pytorch_model(a__ , flax_model.params )
lowercase_ = flax_model.generate(a__ ).sequences
lowercase_ = pt_model.generate(torch.tensor(a__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowercase_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = False
lowercase_ = max_length
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : List[Any] ) -> List[str]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = True
lowercase_ = max_length
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : int ) -> Dict:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = False
lowercase_ = max_length
lowercase_ = 2
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = False
lowercase_ = max_length
lowercase_ = 2
lowercase_ = 2
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _lowercase ( self : List[str] ) -> Any:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = True
lowercase_ = max_length
lowercase_ = 0.8
lowercase_ = 1_0
lowercase_ = 0.3
lowercase_ = 1
lowercase_ = 8
lowercase_ = 9
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = max_length
lowercase_ = 1
lowercase_ = 8
lowercase_ = 9
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : Dict ) -> Tuple:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
lowercase_ = max_length
lowercase_ = 2
lowercase_ = 1
lowercase_ = 8
lowercase_ = 9
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase_ = attention_mask.at[(0, 0)].set(0 )
lowercase_ = False
lowercase_ = max_length
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase_ = attention_mask.at[(0, 0)].set(0 )
lowercase_ = True
lowercase_ = max_length
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _lowercase ( self : str ) -> List[Any]:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = self._get_input_ids_and_config()
# pad attention mask on the left
lowercase_ = attention_mask.at[(0, 0)].set(0 )
lowercase_ = 2
lowercase_ = max_length
for model_class in self.all_generative_model_classes:
lowercase_ = model_class(a__ )
lowercase_ = model.generate(a__ , attention_mask=a__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , a__ )
lowercase_ = jit(model.generate )
lowercase_ = jit_generate(a__ , attention_mask=a__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> str:
lowercase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
lowercase_ = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowercase_ = '''Hello world'''
lowercase_ = tokenizer(a__ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(a__ , '''do_samples''' ):
model.generate(a__ , do_samples=a__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(a__ , '''foo''' ):
lowercase_ = {'''foo''': '''bar'''}
model.generate(a__ , **a__ )
| 30 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
from collections import deque
class __SCREAMING_SNAKE_CASE:
def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: int ) -> str:
snake_case__ = process_name # process name
snake_case__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case__ = arrival_time
snake_case__ = burst_time # remaining burst time
snake_case__ = 0 # total time of the process wait in ready queue
snake_case__ = 0 # time from arrival time to completion time
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] , UpperCamelCase: int , UpperCamelCase: list[int] , UpperCamelCase: deque[Process] , UpperCamelCase: int , ) -> List[Any]:
snake_case__ = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case__ = time_slices
# unfinished process is in this ready_queue
snake_case__ = queue
# current time
snake_case__ = current_time
# finished process is in this sequence queue
snake_case__ = deque()
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: list[Process] ) -> int:
snake_case__ = []
for i in range(len(a__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase_ ( self: Any , UpperCamelCase: list[Process] ) -> Union[str, Any]:
snake_case__ = []
for i in range(len(a__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase_ ( self: str , UpperCamelCase: list[Process] ) -> Union[str, Any]:
snake_case__ = []
for i in range(len(a__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: deque[Process] ) -> Union[str, Any]:
return [q.burst_time for q in queue]
def lowerCAmelCase_ ( self: int , UpperCamelCase: Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: deque[Process] ) -> Tuple:
snake_case__ = deque() # sequence deque of finished process
while len(a__ ) != 0:
snake_case__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case__ = 0
# set the process's turnaround time because it is finished
snake_case__ = self.current_time - cp.arrival_time
# set the completion time
snake_case__ = self.current_time
# add the process to queue that has finished queue
finished.append(a__ )
self.finish_queue.extend(a__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: deque[Process] , UpperCamelCase: int ) -> Optional[Any]:
snake_case__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a__ ) ):
snake_case__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case__ = 0
# set the finish time
snake_case__ = self.current_time
# update the process' turnaround time because it is finished
snake_case__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a__ )
self.finish_queue.extend(a__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase_ ( self: List[Any] ) -> int:
for i in range(self.number_of_queues - 1 ):
snake_case__ , snake_case__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__UpperCamelCase : str = Process("""P1""", 0, 53)
__UpperCamelCase : Tuple = Process("""P2""", 0, 17)
__UpperCamelCase : int = Process("""P3""", 0, 68)
__UpperCamelCase : Tuple = Process("""P4""", 0, 24)
__UpperCamelCase : Any = 3
__UpperCamelCase : Union[str, Any] = [17, 25]
__UpperCamelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__UpperCamelCase : Tuple = Process("""P1""", 0, 53)
__UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17)
__UpperCamelCase : int = Process("""P3""", 0, 68)
__UpperCamelCase : Any = Process("""P4""", 0, 24)
__UpperCamelCase : Union[str, Any] = 3
__UpperCamelCase : str = [17, 25]
__UpperCamelCase : str = deque([Pa, Pa, Pa, Pa])
__UpperCamelCase : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
__UpperCamelCase : Dict = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 307 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_UpperCAmelCase )} , )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_UpperCamelCase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_UpperCamelCase : bool = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'''--config_overrides can\'t be used in combination with --config_name or --model_name_or_path''' )
@dataclass
class _A :
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_UpperCamelCase : Optional[str] = field(default=_UpperCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
_UpperCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
_UpperCamelCase : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_UpperCamelCase : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_UpperCamelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_UpperCamelCase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_UpperCamelCase : bool = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __a ( self : int ) -> str:
"""simple docstring"""
if self.train_file is not None:
lowercase : Optional[int] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowercase : Dict = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def snake_case( __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
lowercase : int = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())]
assert len(snake_case_ ) == len(snake_case_ )
lowercase : Tuple = {c: dataset[c] for c in dataset.column_names}
lowercase : Union[str, Any] = refs
return Dataset.from_dict(snake_case_ )
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowercase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowercase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
lowercase : Any = {}
if data_args.train_file is not None:
lowercase : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowercase : List[str] = data_args.validation_file
lowercase : List[str] = data_args.train_file.split('''.''' )[-1]
if extension == "txt":
lowercase : Optional[int] = '''text'''
lowercase : Dict = load_dataset(snake_case_ , data_files=snake_case_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Tuple = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase : Dict = AutoConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
lowercase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
lowercase : List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
lowercase : Optional[Any] = {
'''cache_dir''': model_args.cache_dir,
'''use_fast''': model_args.use_fast_tokenizer,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowercase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case_ )
elif model_args.model_name_or_path:
lowercase : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.''' )
if model_args.model_name_or_path:
lowercase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase : Optional[int] = AutoModelForMaskedLM.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowercase : Optional[Any] = datasets['''train'''].column_names
else:
lowercase : Optional[int] = datasets['''validation'''].column_names
lowercase : int = '''text''' if '''text''' in column_names else column_names[0]
lowercase : Optional[Any] = '''max_length''' if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ ):
# Remove empty lines
lowercase : str = [line for line in examples['''text'''] if len(snake_case_ ) > 0 and not line.isspace()]
return tokenizer(examples['''text'''] , padding=snake_case_ , truncation=snake_case_ , max_length=data_args.max_seq_length )
lowercase : Any = datasets.map(
snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowercase : List[Any] = add_chinese_references(tokenized_datasets['''train'''] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowercase : Optional[int] = add_chinese_references(
tokenized_datasets['''validation'''] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowercase : List[str] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowercase : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowercase : int = DataCollatorForWholeWordMask(tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase : Any = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets['''train'''] if training_args.do_train else None , eval_dataset=tokenized_datasets['''validation'''] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowercase : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowercase : Dict = model_args.model_name_or_path
else:
lowercase : Union[str, Any] = None
lowercase : int = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
lowercase : Optional[Any] = os.path.join(training_args.output_dir , '''train_results.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Train results *****''' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# Evaluation
lowercase : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : Optional[Any] = trainer.evaluate()
lowercase : List[str] = math.exp(eval_output['''eval_loss'''] )
lowercase : Tuple = perplexity
lowercase : List[str] = os.path.join(training_args.output_dir , '''eval_results_mlm_wwm.txt''' )
if trainer.is_world_process_zero():
with open(snake_case_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 308 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A__ :
"""simple docstring"""
@staticmethod
def _lowerCAmelCase ( *lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCAmelCase ( a_: int ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = pipeline(
"document-question-answering" , model=a__ , tokenizer=a__ , image_processor=a__ )
_UpperCAmelCase : int = INVOICE_URL
_UpperCAmelCase : Optional[int] = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
_UpperCAmelCase : Union[str, Any] = "What is the placebo?"
_UpperCAmelCase : Union[str, Any] = [
{
"image": load_image(a__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{"score": ANY(a__ ), "answer": ANY(a__ ), "start": ANY(a__ ), "end": ANY(a__ )},
{"score": ANY(a__ ), "answer": ANY(a__ ), "start": ANY(a__ ), "end": ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
_UpperCAmelCase : str = INVOICE_URL
_UpperCAmelCase : List[str] = "How many cats are there?"
_UpperCAmelCase : Union[str, Any] = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
_UpperCAmelCase : List[str] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
_UpperCAmelCase : List[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_UpperCAmelCase : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase : Dict = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
_UpperCAmelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
_UpperCAmelCase : str = []
_UpperCAmelCase : str = []
_UpperCAmelCase : Union[str, Any] = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
_UpperCAmelCase : List[str] = INVOICE_URL
_UpperCAmelCase : Any = "What is the invoice number?"
_UpperCAmelCase : List[str] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
_UpperCAmelCase : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
_UpperCAmelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=5_0 , )
_UpperCAmelCase : Any = INVOICE_URL
_UpperCAmelCase : Tuple = "What is the invoice number?"
_UpperCAmelCase : Optional[Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
_UpperCAmelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
_UpperCAmelCase : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a__ )
_UpperCAmelCase : Any = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a__ , revision="3dc6de3" , )
_UpperCAmelCase : Any = INVOICE_URL
_UpperCAmelCase : Optional[int] = "What is the invoice number?"
_UpperCAmelCase : Union[str, Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
] , )
_UpperCAmelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
] , )
_UpperCAmelCase : Optional[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2 , )
_UpperCAmelCase : Any = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase : Union[str, Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0819, "answer": "1110212019", "start": 2_3, "end": 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a__ )
_UpperCAmelCase : List[str] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a__ , revision="3dc6de3" , max_seq_len=5_0 , )
_UpperCAmelCase : Tuple = INVOICE_URL
_UpperCAmelCase : int = "What is the invoice number?"
_UpperCAmelCase : List[str] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
_UpperCAmelCase : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2 , )
_UpperCAmelCase : Dict = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase : Tuple = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9998, "answer": "us-001", "start": 1_6, "end": 1_6},
] , )
@slow
@require_torch
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
_UpperCAmelCase : Union[str, Any] = INVOICE_URL
_UpperCAmelCase : Dict = "What is the invoice number?"
_UpperCAmelCase : int = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_snake_case = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCAmelCase_ ( _UpperCAmelCase):
lowerCamelCase__ = 'albert'
def __init__( self, __a=3_0000, __a=128, __a=4096, __a=12, __a=1, __a=64, __a=1_6384, __a=1, __a="gelu_new", __a=0, __a=0, __a=512, __a=2, __a=0.02, __a=1E-12, __a=0.1, __a="absolute", __a=0, __a=2, __a=3, **__a, ):
'''simple docstring'''
super().__init__(pad_token_id=a__, bos_token_id=a__, eos_token_id=a__, **a__)
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : str = num_hidden_groups
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Union[str, Any] = inner_group_num
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : str = type_vocab_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : Any = position_embedding_type
class UpperCAmelCase_ ( _UpperCAmelCase):
@property
def snake_case__ ( self):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
def a_ ( __snake_case : int = 6008_5147_5143 ) -> int:
"""simple docstring"""
try:
lowerCamelCase_ =int(snake_case_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowerCamelCase_ =1
lowerCamelCase_ =2
while i * i <= n:
while n % i == 0:
lowerCamelCase_ =i
n //= i
i += 1
if n > 1:
lowerCamelCase_ =n
return int(snake_case_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 75 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__A : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__A : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCamelCase_ ( A__ : str , A__ : Tuple=1_00 , A__ : int=" " ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = text.split(snake_case_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(snake_case_ ) , snake_case_ )]
def UpperCamelCase_ ( A__ : dict ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[str] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(snake_case_ ):
titles.append(title if title is not None else """""" )
texts.append(snake_case_ )
return {"title": titles, "text": texts}
def UpperCamelCase_ ( A__ : dict , A__ : DPRContextEncoder , A__ : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=snake_case_ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ : Dict = ctx_encoder(input_ids.to(device=snake_case_ ) , return_dict=snake_case_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCamelCase_ ( A__ : "RagExampleArguments" , A__ : "ProcessingArguments" , A__ : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ : List[Any] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ : Any = dataset.map(snake_case_ , batched=snake_case_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=snake_case_ )
lowerCAmelCase_ : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ : Optional[int] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ : Union[str, Any] = dataset.map(
partial(snake_case_ , ctx_encoder=snake_case_ , ctx_tokenizer=snake_case_ ) , batched=snake_case_ , batch_size=processing_args.batch_size , features=snake_case_ , )
# And finally save your dataset
lowerCAmelCase_ : Any = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(snake_case_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=snake_case_ )
# And save the index
lowerCAmelCase_ : Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(snake_case_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=str(Path(_UpperCAmelCase).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv') ,metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} ,)
lowercase = field(
default=_UpperCAmelCase ,metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} ,)
lowercase = field(
default='facebook/rag-sequence-nq' ,metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} ,)
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' ,metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} ,)
lowercase = field(
default=str(Path(_UpperCAmelCase).parent / 'test_run' / 'dummy-kb') ,metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=_UpperCAmelCase ,metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} ,)
lowercase = field(
default=16 ,metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
default=7_68 ,metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__A : Union[str, Any] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__A , __A , __A : Optional[int] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__A : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
from math import loga
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError("""Input value must be a \'int\' type""" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
snake_case_ = logging.get_logger(__name__)
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ = None ):
UpperCAmelCase = tesseract_config if tesseract_config is not None else ''
# apply OCR
UpperCAmelCase = to_pil_image(snake_case_ )
UpperCAmelCase , UpperCAmelCase = pil_image.size
UpperCAmelCase = pytesseract.image_to_data(snake_case_ , lang=snake_case_ , output_type='dict' , config=snake_case_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCAmelCase = [idx for idx, word in enumerate(snake_case_ ) if not word.strip()]
UpperCAmelCase = [word for idx, word in enumerate(snake_case_ ) if idx not in irrelevant_indices]
UpperCAmelCase = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
UpperCAmelCase = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
UpperCAmelCase = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
UpperCAmelCase = [coord for idx, coord in enumerate(snake_case_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCAmelCase = []
for x, y, w, h in zip(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(snake_case_ )
# finally, normalize the bounding boxes
UpperCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case_ , snake_case_ , snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase = ['pixel_values']
def __init__( self :str , lowercase_ :bool = True , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = PILImageResampling.BILINEAR , lowercase_ :bool = True , lowercase_ :Optional[str] = None , lowercase_ :Optional[str] = "" , **lowercase_ :int , ) -> List[Any]:
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
UpperCAmelCase = get_size_dict(a__ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = apply_ocr
UpperCAmelCase = ocr_lang
UpperCAmelCase = tesseract_config
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :np.ndarray , lowercase_ :Dict[str, int] , lowercase_ :PILImageResampling = PILImageResampling.BILINEAR , lowercase_ :Optional[Union[str, ChannelDimension]] = None , **lowercase_ :List[str] , ) -> Any:
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCAmelCase = (size['height'], size['width'])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def UpperCAmelCase__ ( self :Dict , lowercase_ :ImageInput , lowercase_ :bool = None , lowercase_ :Dict[str, int] = None , lowercase_ :PILImageResampling = None , lowercase_ :bool = None , lowercase_ :Optional[str] = None , lowercase_ :Optional[str] = None , lowercase_ :Optional[Union[str, TensorType]] = None , lowercase_ :ChannelDimension = ChannelDimension.FIRST , **lowercase_ :List[str] , ) -> Union[str, Any]:
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCAmelCase = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(a__ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCAmelCase = []
UpperCAmelCase = []
for image in images:
UpperCAmelCase , UpperCAmelCase = apply_tesseract(a__ , a__ , a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
UpperCAmelCase = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCAmelCase = [flip_channel_order(a__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(a__ , a__ ) for image in images]
UpperCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=a__ )
if apply_ocr:
UpperCAmelCase = words_batch
UpperCAmelCase = boxes_batch
return data
| 78 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _snake_case ( _UpperCAmelCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> List[Any]:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def A__ ( self: int ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(a__ )
def A__ ( self: List[str] ) -> Any:
UpperCAmelCase_ : Any = self._create_example_records()
UpperCAmelCase_ : int = Dataset.from_list(a__ )
self.assertListEqual(dset.column_names ,["""col_1""", """col_2"""] )
for i, r in enumerate(a__ ):
self.assertDictEqual(a__ ,example_records[i] )
def A__ ( self: str ) -> Dict:
UpperCAmelCase_ : List[Any] = self._create_example_records()
UpperCAmelCase_ : List[Any] = Dataset.from_list(a__ )
UpperCAmelCase_ : Union[str, Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info ,dset_from_dict.info )
def A__ ( self: str ) -> str: # checks what happens with missing columns
UpperCAmelCase_ : str = [{"""col_1""": 1}, {"""col_2""": """x"""}]
UpperCAmelCase_ : Optional[Any] = Dataset.from_list(a__ )
self.assertDictEqual(dset[0] ,{"""col_1""": 1} )
self.assertDictEqual(dset[1] ,{"""col_1""": None} ) # NB: first record is used for columns
def A__ ( self: Union[str, Any] ) -> Any: # checks if the type can be inferred from the second record
UpperCAmelCase_ : Optional[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
UpperCAmelCase_ : Optional[int] = Dataset.from_list(a__ )
self.assertEqual(dset.info.features["""col_1"""] ,Sequence(Value("""int64""" ) ) )
def A__ ( self: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = Dataset.from_list([] )
self.assertEqual(len(a__ ) ,0 )
self.assertListEqual(dset.column_names ,[] )
| 345 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self ):
__lowerCAmelCase = 1
__lowerCAmelCase = 3
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a__ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(a__ )
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.Generator(device=a__ ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
__lowerCAmelCase = torch.Generator(device=a__ ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=a__ , generator=a__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=a__ , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
assert image.shape[0] == 2
__lowerCAmelCase = torch.Generator(device=a__ ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case ( self ):
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase = unet.half()
__lowerCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=a__ , low_res_scheduler=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , ).images
__lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def snake_case ( self ):
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=a__ , image=a__ , generator=a__ , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
a__ , torch_dtype=torch.floataa , )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=a__ , image=a__ , generator=a__ , num_inference_steps=5 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 57 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
def a ( snake_case__: int = 200 ):
'''simple docstring'''
lowercase_ = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ = [0] * (pence + 1)
lowercase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(snake_case_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 30 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : List[str] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case( ) -> Dict:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class _A ( nn.Module ):
def __init__( self : List[Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase : Union[str, Any] = nn.Linear(3 , 4 )
lowercase : str = nn.BatchNormad(4 )
lowercase : Optional[int] = nn.Linear(4 , 5 )
def __a ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(a__ ) ) )
class _A ( unittest.TestCase ):
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : Any ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a__ , [128, 64, 32, 16, 8] )
def __a ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : str , _A : Tuple ):
nonlocal batch_sizes
batch_sizes.append(a__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowercase , lowercase : Tuple = mock_training_loop_function('''hello''' )
self.assertListEqual(a__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_A : Optional[int] ):
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_A : str ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __a ( self : int ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_A : int , _A : Tuple , _A : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a__ ) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_A : Optional[Any] ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(a__ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = torch.cuda.memory_allocated()
lowercase : Union[str, Any] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a__ )
lowercase : Any = release_memory(a__ )
self.assertEqual(torch.cuda.memory_allocated() , a__ ) | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( a_: dict ):
return (data["data"], data["target"])
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray, a_: np.ndarray ):
_UpperCAmelCase : Tuple = XGBRegressor(verbosity=0, random_state=42 )
xgb.fit(snake_case_, snake_case_ )
# Predict target for test data
_UpperCAmelCase : str = xgb.predict(snake_case_ )
_UpperCAmelCase : Dict = predictions.reshape(len(snake_case_ ), 1 )
return predictions
def __UpperCAmelCase ( ):
_UpperCAmelCase : Any = fetch_california_housing()
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = data_handling(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = train_test_split(
snake_case_, snake_case_, test_size=0.25, random_state=1 )
_UpperCAmelCase : List[Any] = xgboost(snake_case_, snake_case_, snake_case_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(snake_case_, snake_case_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(snake_case_, snake_case_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 145 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A ( _lowerCamelCase = 3 ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
_lowerCAmelCase : int = QuantumRegister(snake_case_ , "qr" )
_lowerCAmelCase : Union[str, Any] = ClassicalRegister(snake_case_ , "cr" )
_lowerCAmelCase : int = QuantumCircuit(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = number_of_qubits
for i in range(snake_case_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case_ , snake_case_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case_ , snake_case_ )
# simulate with 10000 shots
_lowerCAmelCase : str = Aer.get_backend("qasm_simulator" )
_lowerCAmelCase : int = execute(snake_case_ , snake_case_ , shots=10_000 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
def a_ ( __snake_case : list ) -> list:
"""simple docstring"""
if len(snake_case_ ) <= 1:
return lst
lowerCamelCase_ =1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCamelCase_, lowerCamelCase_ =lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCamelCase_ =1
return lst
if __name__ == "__main__":
a_ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
a_ : Dict = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 75 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase):
"""simple docstring"""
lowercase = IFPipeline
lowercase = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowercase ( self : List[Any] ) -> Tuple:
return self._get_dummy_components()
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str=0 ) -> Optional[int]:
if str(a__ ).startswith("""mps""" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(a__ )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
lowerCAmelCase_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Tuple ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowercase ( self : List[str] ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_save_load_local()
def __lowercase ( self : Any ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowercase ( self : Any ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
lowerCAmelCase_ : int = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase_ : str = None
lowerCAmelCase_ : str = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase_ : int = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase_ : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase_ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def __lowercase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ) -> Optional[int]:
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : str = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
lowerCAmelCase_ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : List[Any] = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowerCAmelCase_ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase_ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def __lowercase ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str ) -> Union[str, Any]:
_start_torch_memory_measurement()
lowerCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
lowerCAmelCase_ : str = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : int = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ : Tuple = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowerCAmelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def __lowercase ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : int ) -> Optional[Any]:
_start_torch_memory_measurement()
lowerCAmelCase_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
lowerCAmelCase_ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : Tuple = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
lowerCAmelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase_ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(a__ )
lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(a__ )
lowerCAmelCase_ : Union[str, Any] = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
lowerCAmelCase_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def UpperCamelCase_ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _a ( _UpperCAmelCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[Any]:
UpperCAmelCase_: str = parent
UpperCAmelCase_: str = batch_size
UpperCAmelCase_: str = seq_length
UpperCAmelCase_: Optional[Any] = is_training
UpperCAmelCase_: Optional[Any] = use_input_mask
UpperCAmelCase_: Union[str, Any] = use_token_type_ids
UpperCAmelCase_: Any = use_labels
UpperCAmelCase_: int = vocab_size
UpperCAmelCase_: Optional[Any] = hidden_size
UpperCAmelCase_: List[Any] = num_hidden_layers
UpperCAmelCase_: List[str] = num_attention_heads
UpperCAmelCase_: List[str] = intermediate_size
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: Optional[Any] = hidden_dropout_prob
UpperCAmelCase_: List[str] = attention_probs_dropout_prob
UpperCAmelCase_: Union[str, Any] = max_position_embeddings
UpperCAmelCase_: int = type_vocab_size
UpperCAmelCase_: Union[str, Any] = type_sequence_label_size
UpperCAmelCase_: List[Any] = initializer_range
UpperCAmelCase_: Optional[Any] = num_labels
UpperCAmelCase_: List[Any] = num_choices
UpperCAmelCase_: Any = scope
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase_: Tuple = None
if self.use_input_mask:
UpperCAmelCase_: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_: Dict = None
UpperCAmelCase_: List[str] = None
UpperCAmelCase_: Optional[int] = None
if self.use_labels:
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase_: int = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase_: Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case (self ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCAmelCase_: Any = DistilBertModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(a__, a__ )
UpperCAmelCase_: List[str] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: Optional[int] = DistilBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(a__, attention_mask=a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = DistilBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: Optional[Any] = model(
a__, attention_mask=a__, start_positions=a__, end_positions=a__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Tuple = self.num_labels
UpperCAmelCase_: Tuple = DistilBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: Optional[int] = model(a__, attention_mask=a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: List[Any] = self.num_labels
UpperCAmelCase_: List[str] = DistilBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: int = model(a__, attention_mask=a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Tuple = self.num_choices
UpperCAmelCase_: str = DistilBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_: List[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: Dict = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase_: Union[str, Any] = model(
a__, attention_mask=a__, labels=a__, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[Any] = self.prepare_config_and_inputs()
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)): Optional[Any] = config_and_inputs
UpperCAmelCase_: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = True
A = True
A = True
A = True
def __snake_case (self ) -> int:
UpperCAmelCase_: int = DistilBertModelTester(self )
UpperCAmelCase_: int = ConfigTester(self, config_class=a__, dim=37 )
def __snake_case (self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a__ )
def __snake_case (self ) -> str:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a__ )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a__ )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a__ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a__ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a__ )
@slow
def __snake_case (self ) -> Any:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: Dict = DistilBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@slow
@require_torch_gpu
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(config=a__ )
UpperCAmelCase_: List[Any] = self._prepare_for_class(a__, a__ )
UpperCAmelCase_: Union[str, Any] = torch.jit.trace(
a__, (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__, os.path.join(a__, """traced_model.pt""" ) )
UpperCAmelCase_: str = torch.jit.load(os.path.join(a__, """traced_model.pt""" ), map_location=a__ )
loaded(inputs_dict["""input_ids"""].to(a__ ), inputs_dict["""attention_mask"""].to(a__ ) )
@require_torch
class _a ( unittest.TestCase ):
@slow
def __snake_case (self ) -> str:
UpperCAmelCase_: Optional[int] = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
UpperCAmelCase_: int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCAmelCase_: Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_: Tuple = model(a__, attention_mask=a__ )[0]
UpperCAmelCase_: Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, a__ )
UpperCAmelCase_: List[Any] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], a__, atol=1E-4 ) )
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
snake_case_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any]=16 , lowercase_ :str=13 , lowercase_ :str=7 , lowercase_ :Optional[int]=14 , lowercase_ :Tuple=10 , lowercase_ :Dict=19 , lowercase_ :Optional[Any]=5 , lowercase_ :Any=4 , lowercase_ :int=True , lowercase_ :List[str]=16 , lowercase_ :Tuple=2 , lowercase_ :Any=4 , lowercase_ :Dict=4 , lowercase_ :Optional[Any]="gelu" , lowercase_ :Tuple=0.1 , lowercase_ :Any=0.1 , lowercase_ :Optional[int]=[1, 2, 3, 4, 5] , lowercase_ :str=25 , lowercase_ :Optional[Any]=5 , ) -> Dict:
UpperCAmelCase = d_model
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = prediction_length
UpperCAmelCase = context_length
UpperCAmelCase = cardinality
UpperCAmelCase = num_time_features
UpperCAmelCase = lags_sequence
UpperCAmelCase = embedding_dimension
UpperCAmelCase = is_training
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = context_length
UpperCAmelCase = prediction_length + label_length
UpperCAmelCase = label_length
UpperCAmelCase = moving_average
UpperCAmelCase = autocorrelation_factor
def UpperCAmelCase__ ( self :str ) -> Dict:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self :Any , lowercase_ :Union[str, Any] ) -> Any:
UpperCAmelCase = config.context_length + max(config.lags_sequence )
UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
UpperCAmelCase = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self :Tuple ) -> Dict:
UpperCAmelCase = self.get_config()
UpperCAmelCase = self.prepare_autoformer_inputs_dict(a__ )
return config, inputs_dict
def UpperCAmelCase__ ( self :Optional[int] ) -> Any:
UpperCAmelCase , UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self :Dict , lowercase_ :int , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = AutoformerModel(config=a__ ).to(a__ ).eval()
UpperCAmelCase = model(**a__ )
UpperCAmelCase = outputs.encoder_last_hidden_state
UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(a__ )
UpperCAmelCase = AutoformerEncoder.from_pretrained(a__ ).to(a__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = model.create_network_inputs(**a__ )
UpperCAmelCase , UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
UpperCAmelCase = encoder(inputs_embeds=a__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(a__ )
UpperCAmelCase = AutoformerDecoder.from_pretrained(a__ ).to(a__ )
UpperCAmelCase = decoder(
trend=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = AutoformerModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def UpperCAmelCase__ ( self :Optional[int] ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :int ) -> str:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
UpperCAmelCase , UpperCAmelCase = model_class.from_pretrained(a__ , output_loading_info=a__ )
self.assertEqual(info['missing_keys'] , [] )
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
@unittest.skip(reason='Model has no tokens embeddings' )
def UpperCAmelCase__ ( self :str ) -> Dict:
pass
def UpperCAmelCase__ ( self :Dict ) -> str:
UpperCAmelCase = inspect.signature(getattr(a__ , 'forward' ) )
# The main input is the name of the argument after `self`
UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , a__ )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(a__ )] , a__ )
def UpperCAmelCase__ ( self :str ) -> Optional[int]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
UpperCAmelCase = getattr(self.model_tester , 'seq_length' , a__ )
UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , a__ )
UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , a__ )
UpperCAmelCase = getattr(self.model_tester , 'd_model' , a__ )
UpperCAmelCase = getattr(self.model_tester , 'num_attention_heads' , a__ )
UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
UpperCAmelCase = len(a__ )
UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a__ , a__ )
# decoder attentions
UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + 2 , len(a__ ) )
UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self :str ) -> Dict:
super().test_retain_grad_hidden_states_attentions()
def _lowerCAmelCase ( lowercase_="train-batch.pt" ):
UpperCAmelCase = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=snake_case_ , repo_type='dataset' )
UpperCAmelCase = torch.load(snake_case_ , map_location=snake_case_ )
return batch
@require_torch
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ) -> Optional[int]:
UpperCAmelCase = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(a__ )
UpperCAmelCase = prepare_batch()
with torch.no_grad():
UpperCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
UpperCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , a__ )
UpperCAmelCase = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(a__ )
UpperCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
UpperCAmelCase = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
UpperCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , a__ )
UpperCAmelCase = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=a__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , a__ , atol=a__ ) )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]:
UpperCAmelCase = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(a__ )
UpperCAmelCase = prepare_batch('val-batch.pt' )
with torch.no_grad():
UpperCAmelCase = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
UpperCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , a__ )
UpperCAmelCase = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=a__ )
UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a__ , rtol=1E-1 ) )
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
from math import factorial
def lowerCamelCase_ ( _a : int , _a : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
F"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
import re
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = split_input(snake_case_ )
if upper:
__lowerCAmelCase = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowerCAmelCase = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return to_simple_case(snake_case_ )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = to_simple_case(snake_case_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(snake_case_ , snake_case_ , "_" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(snake_case_ , snake_case_ , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__( _UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = 'canine'
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Any=1_6_3_8_4 , SCREAMING_SNAKE_CASE_ : Dict=1_6 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Tuple=0 , SCREAMING_SNAKE_CASE_ : List[Any]=0xe_0_0_0 , SCREAMING_SNAKE_CASE_ : List[str]=0xe_0_0_1 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=8 , SCREAMING_SNAKE_CASE_ : Dict=1_6_3_8_4 , SCREAMING_SNAKE_CASE_ : str=1_2_8 , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = type_vocab_size
lowercase_ = layer_norm_eps
# Character config:
lowercase_ = downsampling_rate
lowercase_ = upsampling_kernel_size
lowercase_ = num_hash_functions
lowercase_ = num_hash_buckets
lowercase_ = local_transformer_stride
| 30 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE( _UpperCAmelCase ):
_UpperCAmelCase = 'vit_msn'
def __init__( self: Union[str, Any] , UpperCamelCase: Optional[Any]=7_68 , UpperCamelCase: Optional[Any]=12 , UpperCamelCase: Optional[int]=12 , UpperCamelCase: Optional[int]=30_72 , UpperCamelCase: Union[str, Any]="gelu" , UpperCamelCase: str=0.0 , UpperCamelCase: int=0.0 , UpperCamelCase: Optional[Any]=0.02 , UpperCamelCase: List[Any]=1e-06 , UpperCamelCase: Optional[int]=2_24 , UpperCamelCase: str=16 , UpperCamelCase: Optional[Any]=3 , UpperCamelCase: int=True , **UpperCamelCase: List[Any] , ) -> int:
super().__init__(**a__ )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = qkv_bias
| 307 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
def snake_case( __magic_name__ ) -> bool:
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
lowercase : int = F"""Input value of [number={number}] must be an integer"""
raise TypeError(snake_case_ )
if number < 0:
return False
lowercase : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 308 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A__ :
"""simple docstring"""
def __init__( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[Any] = {}
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]=1 ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(a__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Optional[int] = [[w, v]]
if not self.graph.get(a__ ):
_UpperCAmelCase : int = []
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any]=-2 , lowerCAmelCase__ : Optional[int]=-1 ) -> int:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Any = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Dict = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Dict=-1 ) -> Dict:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : Union[str, Any] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any]=-2 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : List[Any] = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Tuple ) -> int:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Dict=-2 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : int = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(a__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return sorted_nodes
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : List[Any] = -2
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Any = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Tuple = True
if len(a__ ) != 0:
_UpperCAmelCase : Any = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : List[str] = False
indirect_parents.append(a__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Optional[Any] = -2
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Union[str, Any] = True
if len(a__ ) != 0:
_UpperCAmelCase : int = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a__ )
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Tuple=-2 , lowerCAmelCase__ : List[Any]=-1 ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = time()
self.dfs(a__ , a__ )
_UpperCAmelCase : List[str] = time()
return end - begin
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any]=-2 ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = time()
self.bfs(a__ )
_UpperCAmelCase : int = time()
return end - begin
class A__ :
"""simple docstring"""
def __init__( self : List[str] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = {}
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict=1 ) -> Optional[Any]:
"""simple docstring"""
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Any = [[w, v]]
# add the other way
if self.graph.get(a__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Union[str, Any] = [[w, u]]
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(a__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(a__ )
# the other way round
if self.graph.get(a__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(a__ )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : int=-2 , lowerCAmelCase__ : Union[str, Any]=-1 ) -> List[Any]:
"""simple docstring"""
if s == d:
return []
_UpperCAmelCase : Dict = []
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(a__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(a__ ) != 0:
_UpperCAmelCase : Optional[Any] = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return visited
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Tuple=-1 ) -> Dict:
"""simple docstring"""
if c == -1:
_UpperCAmelCase : int = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(a__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase : Dict = floor(random() * c ) + 1
if n != i:
self.add_pair(a__ , a__ , 1 )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Union[str, Any]=-2 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = deque()
_UpperCAmelCase : List[str] = []
if s == -2:
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
d.append(a__ )
visited.append(a__ )
while d:
_UpperCAmelCase : List[str] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Dict ) -> str:
"""simple docstring"""
return len(self.graph[u] )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Tuple = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Dict = len(a__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Any = True
if len(a__ ) != 0:
_UpperCAmelCase : int = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(a__ )
_UpperCAmelCase : str = s
_UpperCAmelCase : Tuple = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return list(a__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = list(self.graph )[0]
stack.append(a__ )
visited.append(a__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : int = False
_UpperCAmelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(a__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(a__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(a__ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(a__ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(a__ ) == 0:
return False
def _lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return list(self.graph )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str]=-2 , lowerCAmelCase__ : List[str]=-1 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = time()
self.dfs(a__ , a__ )
_UpperCAmelCase : Dict = time()
return end - begin
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int=-2 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = time()
self.bfs(a__ )
_UpperCAmelCase : Optional[Any] = time()
return end - begin | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if len(snake_case_ ) == 0:
return False
_lowerCAmelCase : Dict = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = "" if binary_search(sequence, target) else "not "
print(f'''{target} was {not_str}found in {sequence}''')
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
a_ : Union[str, Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ : Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ : Tuple = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def a_ ( __snake_case : int , __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
assert len(str(snake_case_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase_ =year // 100
lowerCamelCase_ =(5 * (century % 4) + 2) % 7
lowerCamelCase_ =year % 100
lowerCamelCase_ =centurian % 12
lowerCamelCase_ =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase_ =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase_ =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _UpperCAmelCase):
"""simple docstring"""
lowercase = 'poolformer'
def __init__( self : Optional[Any] , lowerCamelCase : int=3 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Tuple=16 , lowerCamelCase : List[Any]=3 , lowerCamelCase : List[str]=4.0 , lowerCamelCase : Optional[int]=[2, 2, 6, 2] , lowerCamelCase : str=[64, 1_28, 3_20, 5_12] , lowerCamelCase : int=[7, 3, 3, 3] , lowerCamelCase : Optional[int]=[4, 2, 2, 2] , lowerCamelCase : Optional[int]=[2, 1, 1, 1] , lowerCamelCase : List[Any]=4 , lowerCamelCase : Any=0.0 , lowerCamelCase : Dict="gelu" , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : str=0.02 , **lowerCamelCase : Dict , ) -> str:
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : List[str] = patch_size
lowerCAmelCase_ : List[str] = stride
lowerCAmelCase_ : Any = padding
lowerCAmelCase_ : int = pool_size
lowerCAmelCase_ : Tuple = hidden_sizes
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Optional[int] = depths
lowerCAmelCase_ : List[Any] = patch_sizes
lowerCAmelCase_ : Any = strides
lowerCAmelCase_ : str = num_encoder_blocks
lowerCAmelCase_ : List[str] = drop_path_rate
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : List[Any] = use_layer_scale
lowerCAmelCase_ : Optional[int] = layer_scale_init_value
lowerCAmelCase_ : str = initializer_range
super().__init__(**a__ )
class __snake_case ( _UpperCAmelCase):
"""simple docstring"""
lowercase = version.parse('1.11')
@property
def __lowercase ( self : Union[str, Any] ) -> List[Any]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowercase ( self : int ) -> Union[str, Any]:
return 2E-3
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : List[str] = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class A_ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = BartphoTokenizer
__UpperCamelCase = False
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Tuple ) -> Any:
super().setUp()
UpperCAmelCase = ['▁This', '▁is', '▁a', '▁t', 'est']
UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) )
UpperCAmelCase = {'unk_token': '<unk>'}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
UpperCAmelCase = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :str , **lowercase_ :str ) -> str:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def UpperCAmelCase__ ( self :str , lowercase_ :Any ) -> int:
UpperCAmelCase = 'This is a là test'
UpperCAmelCase = 'This is a<unk><unk> test'
return input_text, output_text
def UpperCAmelCase__ ( self :Dict ) -> List[Any]:
UpperCAmelCase = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
UpperCAmelCase = 'This is a là test'
UpperCAmelCase = '▁This ▁is ▁a ▁l à ▁t est'.split()
UpperCAmelCase = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 78 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase_ = [0, 25, 50]
UpperCamelCase_ = [25, 50, 75]
UpperCamelCase_ = fuzz.membership.trimf(X, abca)
UpperCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase_ = np.ones(75)
UpperCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 345 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , a__ , )
super().__init__(*a__ , **a__ )
| 57 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def a ( ):
'''simple docstring'''
lowercase_ = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowercase_ = randint(-5_000 , 5_000 )
return (arr, r)
__a = make_dataset()
def a ( snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
for triplet in permutations(snake_case_ , 3 ):
if sum(snake_case_ ) == target:
return tuple(sorted(snake_case_ ) )
return (0, 0, 0)
def a ( snake_case__: list[int] , snake_case__: int ):
'''simple docstring'''
arr.sort()
lowercase_ = len(snake_case_ )
for i in range(n - 1 ):
lowercase_ , lowercase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def a ( ):
'''simple docstring'''
lowercase_ = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowercase_ = '''
triplet_sum1(*dataset)
'''
lowercase_ = '''
triplet_sum2(*dataset)
'''
lowercase_ = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10_000 )
lowercase_ = repeat(setup=snake_case_ , stmt=snake_case_ , repeat=5 , number=10_000 )
return (min(snake_case_ ), min(snake_case_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__a = solution_times()
print(f"The time for naive implementation is {times[0]}.")
print(f"The time for optimized implementation is {times[1]}.")
| 30 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
def a_ ( _A = 100 ) -> int:
"""simple docstring"""
snake_case__ = n * (n + 1) * (2 * n + 1) / 6
snake_case__ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = AutoencoderKL
_UpperCamelCase : List[Any] = 'sample'
_UpperCamelCase : List[Any] = 1E-2
@property
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 4
lowercase : str = 3
lowercase : Optional[Any] = (32, 32)
lowercase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase , lowercase : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase : Union[str, Any] = self.model_class(**a__ )
model.to(a__ )
assert not model.is_gradient_checkpointing and model.training
lowercase : List[Any] = model(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase : Tuple = torch.randn_like(a__ )
lowercase : List[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase : str = self.model_class(**a__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase : Tuple = model_a(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase : Any = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase : Union[str, Any] = dict(model.named_parameters() )
lowercase : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase , lowercase : str = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a__ )
lowercase : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Any = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
lowercase : List[Any] = model.to(a__ )
model.eval()
if torch_device == "mps":
lowercase : Union[str, Any] = torch.manual_seed(0 )
else:
lowercase : List[str] = torch.Generator(device=a__ ).manual_seed(0 )
lowercase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase : Union[str, Any] = image.to(a__ )
with torch.no_grad():
lowercase : Union[str, Any] = model(a__ , sample_posterior=a__ , generator=a__ ).sample
lowercase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase : str = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
lowercase : int = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase : int = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(a__ , a__ , rtol=1E-2 ) )
@slow
class _A ( unittest.TestCase ):
def __a ( self : Tuple , _A : Optional[Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy"""
def __a ( self : List[Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] , _A : Optional[Any]=0 , _A : List[Any]=(4, 3, 512, 512) , _A : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
lowercase : Tuple = torch.floataa if fpaa else torch.floataa
lowercase : int = torch.from_numpy(load_hf_numpy(self.get_file_format(a__ , a__ ) ) ).to(a__ ).to(a__ )
return image
def __a ( self : Optional[Any] , _A : Dict="CompVis/stable-diffusion-v1-4" , _A : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = '''fp16''' if fpaa else None
lowercase : Tuple = torch.floataa if fpaa else torch.floataa
lowercase : Union[str, Any] = AutoencoderKL.from_pretrained(
a__ , subfolder='''vae''' , torch_dtype=a__ , revision=a__ , )
model.to(a__ ).eval()
return model
def __a ( self : Union[str, Any] , _A : int=0 ) -> Dict:
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(a__ )
return torch.Generator(device=a__ ).manual_seed(a__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __a ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = self.get_sd_vae_model()
lowercase : List[str] = self.get_sd_image(a__ )
lowercase : str = self.get_generator(a__ )
with torch.no_grad():
lowercase : str = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
lowercase : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : Union[str, Any] , _A : Union[str, Any] , _A : str ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.get_sd_vae_model(fpaa=a__ )
lowercase : Any = self.get_sd_image(a__ , fpaa=a__ )
lowercase : List[Any] = self.get_generator(a__ )
with torch.no_grad():
lowercase : Optional[int] = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
lowercase : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase : Optional[Any] = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def __a ( self : Optional[Any] , _A : str , _A : Tuple , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple = self.get_sd_vae_model()
lowercase : List[Any] = self.get_sd_image(a__ )
with torch.no_grad():
lowercase : Dict = model(a__ ).sample
assert sample.shape == image.shape
lowercase : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase : str = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : str , _A : Optional[int] , _A : Any ) -> int:
"""simple docstring"""
lowercase : Dict = self.get_sd_vae_model()
lowercase : Union[str, Any] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase : List[str] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase : int = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def __a ( self : List[Any] , _A : Any , _A : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self.get_sd_vae_model(fpaa=a__ )
lowercase : Tuple = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
lowercase : Union[str, Any] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase : Dict = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __a ( self : Tuple , _A : Any ) -> int:
"""simple docstring"""
lowercase : Tuple = self.get_sd_vae_model(fpaa=a__ )
lowercase : Optional[int] = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase : str = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def __a ( self : int , _A : Optional[int] ) -> str:
"""simple docstring"""
lowercase : List[str] = self.get_sd_vae_model()
lowercase : str = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase : List[Any] = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def __a ( self : str , _A : Optional[Any] , _A : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = self.get_sd_vae_model()
lowercase : str = self.get_sd_image(a__ )
lowercase : Dict = self.get_generator(a__ )
with torch.no_grad():
lowercase : int = model.encode(a__ ).latent_dist
lowercase : Dict = dist.sample(generator=a__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase : Optional[Any] = torch.tensor(a__ )
lowercase : List[str] = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(a__ , a__ , atol=a__ ) | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict", [
SplitDict(),
SplitDict({"train": SplitInfo(name="train", num_bytes=1_337, num_examples=42, dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train", num_bytes=1_337, num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
], )
def __UpperCAmelCase ( a_: SplitDict ):
_UpperCAmelCase : Union[str, Any] = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase : Dict = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase : str = None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase : Dict = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info", [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="my_dataset" )] )
def __UpperCAmelCase ( a_: int ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCAmelCase : Optional[Any] = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 145 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [0] * len(snake_case_ )
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case_ ) ):
if indegree[i] == 0:
queue.append(snake_case_ )
while queue:
_lowerCAmelCase : int = queue.pop(0 )
cnt += 1
topo.append(snake_case_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case_ )
if cnt != len(snake_case_ ):
print("Cycle exists" )
else:
print(snake_case_ )
# Adjacency List of Graph
_snake_case = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=32, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=[10, 20, 30, 40], lowerCAmelCase=[2, 2, 3, 2], lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase=["stage2", "stage3", "stage4"], lowerCAmelCase=[2, 3, 4], lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =num_stages
lowerCamelCase_ =hidden_sizes
lowerCamelCase_ =depths
lowerCamelCase_ =is_training
lowerCamelCase_ =use_labels
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =num_labels
lowerCamelCase_ =initializer_range
lowerCamelCase_ =out_features
lowerCamelCase_ =out_indices
lowerCamelCase_ =scope
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.num_labels )
lowerCamelCase_ =self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a__, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ =model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ =model(a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ =model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ =None
lowerCamelCase_ =ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
lowerCamelCase_ =model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowercase : Optional[Any] =(
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowercase : Dict =True
lowercase : Optional[Any] =False
lowercase : int =False
lowercase : int =False
lowercase : List[str] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ConvNextModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=a__, has_text_modality=a__, hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =model_class(a__ )
lowerCamelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ =[*signature.parameters.keys()]
lowerCamelCase_ =['''pixel_values''']
self.assertListEqual(arg_names[:1], a__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ =model(**self._prepare_for_class(a__, a__ ) )
lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ =self.model_tester.num_stages
self.assertEqual(len(a__ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ =True
check_hidden_states_output(a__, a__, a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ =True
check_hidden_states_output(a__, a__, a__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=a__, return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ =model(**a__ )
# verify the logits
lowerCamelCase_ =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, a__ )
lowerCamelCase_ =torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a__, atol=1e-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , _UpperCAmelCase ):
lowercase : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
lowercase : List[Any] =ConvNextConfig
lowercase : Optional[Any] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ConvNextModelTester(self )
| 75 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _a ( _UpperCAmelCase , unittest.TestCase ):
A = CpmAntTokenizer
A = False
def __snake_case (self ) -> Tuple:
super().setUp()
UpperCAmelCase_: Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
UpperCAmelCase_: int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __snake_case (self ) -> int:
UpperCAmelCase_: List[Any] = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
UpperCAmelCase_: Dict = """今天天气真好!"""
UpperCAmelCase_: Tuple = ["""今天""", """天气""", """真""", """好""", """!"""]
UpperCAmelCase_: Optional[int] = tokenizer.tokenize(a__ )
self.assertListEqual(a__, a__ )
UpperCAmelCase_: str = """今天天气真好!"""
UpperCAmelCase_: List[Any] = [tokenizer.bos_token] + tokens
UpperCAmelCase_: Tuple = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ), a__ )
UpperCAmelCase_: Any = tokenizer.decode(a__ )
self.assertEqual(a__, a__ )
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import requests
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = {'Content-Type': 'application/json'}
UpperCAmelCase = requests.post(snake_case_ , json={'text': message_body} , headers=snake_case_ )
if response.status_code != 200:
UpperCAmelCase = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''DPTFeatureExtractor''']
UpperCamelCase_ = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Optional[Any] = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = ["MobileViTFeatureExtractor"]
A : Union[str, Any] = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowercase__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : str=3_7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> List[str]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str ) -> List[str]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
lowercase_ = LlamaModel(config=a__ )
model.to(a__ )
model.eval()
lowercase_ = model(a__ , attention_mask=a__ )
lowercase_ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , ) -> Union[str, Any]:
lowercase_ = True
lowercase_ = LlamaModel(a__ )
model.to(a__ )
model.eval()
lowercase_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
lowercase_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
lowercase_ = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , ) -> List[str]:
lowercase_ = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
lowercase_ = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , ) -> Tuple:
lowercase_ = True
lowercase_ = True
lowercase_ = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
lowercase_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
lowercase_ = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def _lowercase ( self : Dict ) -> List[str]:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a :List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
a :Dict = (
{
'feature-extraction': LlamaModel,
'text-classification': LlamaForSequenceClassification,
'text-generation': LlamaForCausalLM,
'zero-shot': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a :Any = False
a :Union[str, Any] = False
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = LlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=a__ , hidden_size=3_7 )
def _lowercase ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> Union[str, Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*a__ )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict['''input_ids''']
lowercase_ = input_ids.ne(1 ).to(a__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowercase_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = '''single_label_classification'''
lowercase_ = input_dict['''input_ids''']
lowercase_ = input_ids.ne(1 ).to(a__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowercase_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = '''multi_label_classification'''
lowercase_ = input_dict['''input_ids''']
lowercase_ = input_ids.ne(1 ).to(a__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowercase_ = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def _lowercase ( self : Union[str, Any] ) -> Any:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 1_0] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = LlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
lowercase_ = original_model(a__ ).last_hidden_state
lowercase_ = original_model(a__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {'''type''': scaling_type, '''factor''': 1_0.0}
lowercase_ = LlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
lowercase_ = scaled_model(a__ ).last_hidden_state
lowercase_ = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1e-5 ) )
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _lowercase ( self : Tuple ) -> str:
lowercase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowercase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase_ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ = torch.tensor([-1_2.8_2_8_1, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -1_2.8_2_8_1, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowercase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase_ = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
lowercase_ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowercase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase_ = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
lowercase_ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def _lowercase ( self : Optional[int] ) -> Dict:
lowercase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowercase_ = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase_ = model(torch.tensor(a__ ) )
lowercase_ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowercase_ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , a__ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def _lowercase ( self : List[str] ) -> int:
lowercase_ = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase_ = '''Simply put, the theory of relativity states that '''
lowercase_ = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase_ = tokenizer.encode(a__ , return_tensors='''pt''' )
lowercase_ = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=a__ )
# greedy generation outputs
lowercase_ = model.generate(a__ , max_new_tokens=6_4 , top_p=a__ , temperature=1 , do_sample=a__ )
lowercase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 30 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE( _UpperCAmelCase ):
_UpperCAmelCase = 'beit'
def __init__( self: Tuple , UpperCamelCase: int=81_92 , UpperCamelCase: Any=7_68 , UpperCamelCase: List[Any]=12 , UpperCamelCase: List[Any]=12 , UpperCamelCase: Optional[Any]=30_72 , UpperCamelCase: Optional[Any]="gelu" , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Union[str, Any]=0.0 , UpperCamelCase: Union[str, Any]=0.02 , UpperCamelCase: Tuple=1e-12 , UpperCamelCase: Union[str, Any]=2_24 , UpperCamelCase: str=16 , UpperCamelCase: Any=3 , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Dict=False , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: List[Any]=False , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: List[str]=True , UpperCamelCase: int=[3, 5, 7, 11] , UpperCamelCase: str=[1, 2, 3, 6] , UpperCamelCase: Tuple=True , UpperCamelCase: Any=0.4 , UpperCamelCase: Optional[int]=2_56 , UpperCamelCase: str=1 , UpperCamelCase: Tuple=False , UpperCamelCase: Tuple=2_55 , **UpperCamelCase: Dict , ) -> List[Any]:
super().__init__(**a__ )
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE( _UpperCAmelCase ):
_UpperCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self: int ) -> int:
return 1e-4
| 307 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
from itertools import permutations
def snake_case( __magic_name__ ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowercase : int = [7, 11, 13, 17]
for i, test in enumerate(snake_case_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case( __magic_name__ = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(snake_case_ , snake_case_ ) ) )
for num in permutations(range(snake_case_ ) )
if is_substring_divisible(snake_case_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''') | 308 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
import math
__a = 10
__a = 7
__a = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCAmelCase ( a_: int = 20 ):
_UpperCAmelCase : Any = math.comb(snake_case_, snake_case_ )
_UpperCAmelCase : Optional[int] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case_ )
_UpperCAmelCase : Optional[int] = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20)) | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_snake_case = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
_snake_case = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = list(s_dict.keys() )
for key in keys:
_lowerCAmelCase : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_lowerCAmelCase : Any = new_key.replace(snake_case_ , snake_case_ )
print(F"{key} -> {new_key}" )
_lowerCAmelCase : Optional[Any] = s_dict.pop(snake_case_ )
return s_dict
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = emb.weight.shape
_lowerCAmelCase : Any = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
_lowerCAmelCase : Union[str, Any] = emb.weight.data
return lin_layer
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Dict = os.path.basename(snake_case_ )
_lowerCAmelCase : str = url.split("/" )[-2]
_lowerCAmelCase : str = os.path.join(snake_case_ , snake_case_ )
if os.path.exists(snake_case_ ) and not os.path.isfile(snake_case_ ):
raise RuntimeError(F"{download_target} exists and is not a regular file" )
if os.path.isfile(snake_case_ ):
_lowerCAmelCase : Tuple = open(snake_case_ , "rb" ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(snake_case_ ) as source, open(snake_case_ , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=snake_case_ , unit_divisor=1_024 ) as loop:
while True:
_lowerCAmelCase : Optional[Any] = source.read(8_192 )
if not buffer:
break
output.write(snake_case_ )
loop.update(len(snake_case_ ) )
_lowerCAmelCase : Any = open(snake_case_ , "rb" ).read()
if hashlib.shaaaa(snake_case_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
_lowerCAmelCase : Union[str, Any] = _download(_MODELS[checkpoint_path] )
else:
_lowerCAmelCase : Tuple = torch.load(snake_case_ , map_location="cpu" )
_lowerCAmelCase : List[str] = original_checkpoint["dims"]
_lowerCAmelCase : int = original_checkpoint["model_state_dict"]
_lowerCAmelCase : Union[str, Any] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(snake_case_ )
rename_keys(snake_case_ )
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : int = state_dict["decoder.layers.0.fc1.weight"].shape[0]
_lowerCAmelCase : Optional[Any] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=snake_case_ , decoder_ffn_dim=snake_case_ , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
_lowerCAmelCase : List[Any] = WhisperForConditionalGeneration(snake_case_ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model.model.load_state_dict(snake_case_ , strict=snake_case_ )
if len(snake_case_ ) > 0 and not set(snake_case_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
_lowerCAmelCase : Optional[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : Dict = proj_out_weights
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
a_ : Optional[int] = list[list[int]]
# assigning initial values to the grid
a_ : List[str] = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : List[Any] = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(snake_case_ ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowerCamelCase_ =digit
if sudoku(snake_case_ ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(snake_case_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : str = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 75 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__A : Any = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__A : Optional[Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
__A : Union[str, Any] = os.environ.get("USER_TOKEN", "")
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {
"""Authorization""": f'token {auth_token}',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(snake_case_ , headers=snake_case_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("\'USER_TOKEN\' field cannot be empty.")
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
import qiskit
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase_: Dict = qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCAmelCase_: int = qiskit.execute(snake_case_ , snake_case_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
a : int = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
@dataclass(frozen=_UpperCAmelCase )
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase = 42
def __init__( self :int , lowercase_ :str , lowercase_ :PreTrainedTokenizer , lowercase_ :str , lowercase_ :Optional[int] = None , lowercase_ :List[Any]=False , lowercase_ :bool = False , ) -> int:
UpperCAmelCase = hans_processors[task]()
UpperCAmelCase = os.path.join(
a__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase = label_list[2], label_list[1]
UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase = cached_features_file + '.lock'
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('Training examples: %s' , len(a__ ) )
UpperCAmelCase = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('Saving features into cached file %s' , a__ )
torch.save(self.features , a__ )
def __len__( self :int ) -> Dict:
return len(self.features )
def __getitem__( self :Dict , lowercase_ :List[Any] ) -> Dict:
return self.features[i]
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
"""simple docstring"""
__UpperCamelCase = 42
def __init__( self :Tuple , lowercase_ :str , lowercase_ :PreTrainedTokenizer , lowercase_ :str , lowercase_ :Optional[int] = 1_28 , lowercase_ :Any=False , lowercase_ :bool = False , ) -> Optional[int]:
UpperCAmelCase = hans_processors[task]()
UpperCAmelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase , UpperCAmelCase = label_list[2], label_list[1]
UpperCAmelCase = label_list
UpperCAmelCase = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
UpperCAmelCase = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase = tf.data.Dataset.from_generator(
a__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> str:
return self.dataset
def __len__( self :Dict ) -> List[Any]:
return len(self.features )
def __getitem__( self :Any , lowercase_ :Dict ) -> int:
return self.features[i]
def UpperCAmelCase__ ( self :str ) -> Optional[Any]:
return self.label_list
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict , lowercase_ :Dict ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(a__ , 'heuristics_train_set.txt' ) ) , 'train' )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Tuple ) -> Optional[int]:
return self._create_examples(self._read_tsv(os.path.join(a__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def UpperCAmelCase__ ( self :int ) -> Optional[Any]:
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase__ ( self :Any , lowercase_ :Optional[int] , lowercase_ :List[Any] ) -> List[Any]:
UpperCAmelCase = []
for i, line in enumerate(a__ ):
if i == 0:
continue
UpperCAmelCase = '%s-%s' % (set_type, line[0])
UpperCAmelCase = line[5]
UpperCAmelCase = line[6]
UpperCAmelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCAmelCase = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
UpperCAmelCase = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCAmelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='max_length' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
snake_case_ = {
"""hans""": 3,
}
snake_case_ = {
"""hans""": HansProcessor,
}
| 78 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
def lowerCamelCase_ ( _a : list , _a : int = 0 ):
'''simple docstring'''
UpperCAmelCase_ : Any = length or len(snake_case_ )
UpperCAmelCase_ : List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = list_data[i + 1], list_data[i]
UpperCAmelCase_ : Optional[int] = True
return list_data if not swapped else bubble_sort(snake_case_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
print("Loading config file..." )
def flatten_yaml_as_dict(_UpperCamelCase , _UpperCamelCase="" , _UpperCamelCase="." ):
__lowerCAmelCase = []
for k, v in d.items():
__lowerCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
__lowerCAmelCase = argparse.Namespace()
with open(snake_case_ , "r" ) as yaml_file:
try:
__lowerCAmelCase = yaml.load(snake_case_ , Loader=yaml.FullLoader )
__lowerCAmelCase = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(snake_case_ , str(snake_case_ ) ) )
return config
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = MobileViTVaConfig()
__lowerCAmelCase = False
# dataset
if task_name.startswith("imagenet1k_" ):
__lowerCAmelCase = 1000
if int(task_name.strip().split("_" )[-1] ) == 384:
__lowerCAmelCase = 384
else:
__lowerCAmelCase = 256
__lowerCAmelCase = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_" ):
__lowerCAmelCase = 2_1000
if int(task_name.strip().split("_" )[-1] ) == 384:
__lowerCAmelCase = 384
else:
__lowerCAmelCase = 256
__lowerCAmelCase = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_" ):
__lowerCAmelCase = 151
__lowerCAmelCase = 512
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = True
elif task_name.startswith("voc_" ):
__lowerCAmelCase = 21
__lowerCAmelCase = 512
__lowerCAmelCase = "pascal-voc-id2label.json"
__lowerCAmelCase = True
# orig_config
__lowerCAmelCase = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
__lowerCAmelCase = getattr(snake_case_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(snake_case_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCAmelCase = getattr(snake_case_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCAmelCase = getattr(snake_case_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
__lowerCAmelCase = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
__lowerCAmelCase = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
__lowerCAmelCase = getattr(snake_case_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(snake_case_ )
__lowerCAmelCase = val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "mobilevitv2."
__lowerCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCAmelCase = k[8:]
else:
__lowerCAmelCase = k
if ".block." in k:
__lowerCAmelCase = k_new.replace(".block." , "." )
if ".conv." in k:
__lowerCAmelCase = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
__lowerCAmelCase = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
__lowerCAmelCase = k_new.replace("conv_1." , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__lowerCAmelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__lowerCAmelCase = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
__lowerCAmelCase = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__lowerCAmelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__lowerCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__lowerCAmelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__lowerCAmelCase = [0, 1]
elif i == 4:
__lowerCAmelCase = [0, 1, 2, 3]
elif i == 5:
__lowerCAmelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__lowerCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__lowerCAmelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__lowerCAmelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__lowerCAmelCase = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
__lowerCAmelCase = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
__lowerCAmelCase = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
__lowerCAmelCase = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
__lowerCAmelCase = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
__lowerCAmelCase = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
__lowerCAmelCase = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
__lowerCAmelCase = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
__lowerCAmelCase = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
__lowerCAmelCase = torch.load(snake_case_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
__lowerCAmelCase = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
__lowerCAmelCase = False
else:
__lowerCAmelCase = MobileViTVaForImageClassification(snake_case_ ).eval()
__lowerCAmelCase = False
# remove and rename some keys of load the original model
__lowerCAmelCase = checkpoint
remove_unused_keys(snake_case_ )
__lowerCAmelCase = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = model(**snake_case_ )
# verify classification model
if task_name.startswith("imagenet" ):
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCAmelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 57 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = n
lowercase_ = [None] * self.n
lowercase_ = 0 # index of the first element
lowercase_ = 0
lowercase_ = 0
def __len__( self : List[str] ) -> List[Any]:
return self.size
def _lowercase ( self : str ) -> Tuple:
return self.size == 0
def _lowercase ( self : Dict ) -> List[str]:
return False if self.is_empty() else self.array[self.front]
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
lowercase_ = data
lowercase_ = (self.rear + 1) % self.n
self.size += 1
return self
def _lowercase ( self : str ) -> Union[str, Any]:
if self.size == 0:
raise Exception('''UNDERFLOW''' )
lowercase_ = self.array[self.front]
lowercase_ = None
lowercase_ = (self.front + 1) % self.n
self.size -= 1
return temp
| 30 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
from __future__ import annotations
def a_ ( _A , _A , _A ) -> dict[str, float]:
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
lowercase : List[str] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(a__ ) , a__ )
def __a ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a__ ) , x.transpose() ) )
lowercase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __a ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase : List[str] = np.random.randn(3 , 4 )
lowercase : Dict = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
lowercase : int = np.random.randn(3 , 4 , 5 )
lowercase : Union[str, Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = np.random.randn(3 , 4 )
lowercase : List[str] = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , transpose(a__ ).numpy() ) )
lowercase : Dict = np.random.randn(3 , 4 , 5 )
lowercase : Dict = tf.constant(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , transpose(a__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : str = np.random.randn(3 , 4 )
lowercase : List[str] = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ ) , np.asarray(transpose(a__ ) ) ) )
lowercase : Optional[Any] = np.random.randn(3 , 4 , 5 )
lowercase : str = jnp.array(a__ )
self.assertTrue(np.allclose(transpose(a__ , axes=(1, 2, 0) ) , np.asarray(transpose(a__ , axes=(1, 2, 0) ) ) ) )
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.reshape(a__ , (4, 3) ) ) )
lowercase : List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.reshape(a__ , (12, 5) ) ) )
@require_torch
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : Any = np.random.randn(3 , 4 )
lowercase : Any = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
lowercase : int = np.random.randn(3 , 4 , 5 )
lowercase : int = torch.tensor(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_tf
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = np.random.randn(3 , 4 )
lowercase : int = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , reshape(a__ , (4, 3) ).numpy() ) )
lowercase : int = np.random.randn(3 , 4 , 5 )
lowercase : Dict = tf.constant(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , reshape(a__ , (12, 5) ).numpy() ) )
@require_flax
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = np.random.randn(3 , 4 )
lowercase : Tuple = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (4, 3) ) , np.asarray(reshape(a__ , (4, 3) ) ) ) )
lowercase : List[Any] = np.random.randn(3 , 4 , 5 )
lowercase : Dict = jnp.array(a__ )
self.assertTrue(np.allclose(reshape(a__ , (12, 5) ) , np.asarray(reshape(a__ , (12, 5) ) ) ) )
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase : int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a__ ) , np.squeeze(a__ ) ) )
lowercase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.squeeze(a__ , axis=2 ) ) )
@require_torch
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase : Dict = np.random.randn(1 , 3 , 4 )
lowercase : Any = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
lowercase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowercase : List[Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_tf
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = np.random.randn(1 , 3 , 4 )
lowercase : Optional[Any] = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , squeeze(a__ ).numpy() ) )
lowercase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowercase : int = tf.constant(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , squeeze(a__ , axis=2 ).numpy() ) )
@require_flax
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] = np.random.randn(1 , 3 , 4 )
lowercase : Optional[Any] = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ ) , np.asarray(squeeze(a__ ) ) ) )
lowercase : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
lowercase : Dict = jnp.array(a__ )
self.assertTrue(np.allclose(squeeze(a__ , axis=2 ) , np.asarray(squeeze(a__ , axis=2 ) ) ) )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.expand_dims(a__ , axis=1 ) ) )
@require_torch
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = np.random.randn(3 , 4 )
lowercase : Union[str, Any] = torch.tensor(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_tf
def __a ( self : Any ) -> Any:
"""simple docstring"""
lowercase : Optional[Any] = np.random.randn(3 , 4 )
lowercase : Optional[int] = tf.constant(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , expand_dims(a__ , axis=1 ).numpy() ) )
@require_flax
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : str = np.random.randn(3 , 4 )
lowercase : Optional[Any] = jnp.array(a__ )
self.assertTrue(np.allclose(expand_dims(a__ , axis=1 ) , np.asarray(expand_dims(a__ , axis=1 ) ) ) ) | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCAmelCase : str = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching, "os.path.join", snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCAmelCase ( ):
assert _test_patching.open is open
_UpperCAmelCase : Union[str, Any] = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, "open", snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
_UpperCAmelCase : str = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching, "pandas.read_csv", snake_case_ ):
pass
def __UpperCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_UpperCAmelCase : Union[str, Any] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, "len", snake_case_ ) is None
with patch_submodule(_test_patching, "len", snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCAmelCase ( ):
_UpperCAmelCase : str = "__test_patch_submodule_start_and_stop_mock__"
_UpperCAmelCase : int = patch_submodule(_test_patching, "open", snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCAmelCase : List[Any] = "__test_patch_submodule_successive_join__"
_UpperCAmelCase : List[Any] = "__test_patch_submodule_successive_dirname__"
_UpperCAmelCase : Union[str, Any] = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, "os.path.join", snake_case_ ):
with patch_submodule(_test_patching, "os.rename", snake_case_ ):
with patch_submodule(_test_patching, "os.path.dirname", snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, "os.rename", snake_case_ ):
with patch_submodule(_test_patching, "os.path.join", snake_case_ ):
with patch_submodule(_test_patching, "os.path.dirname", snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[str] = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", snake_case_ ):
pass
with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", snake_case_ ):
pass | 145 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self, __a, __a=7, __a=3, __a=18, __a=30, __a=400, __a=True, __a=None, __a=True, __a=None, __a=True, __a=[0.5, 0.5, 0.5], __a=[0.5, 0.5, 0.5], __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = size if size is not None else {"height": 20, "width": 20}
_lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : int = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : str = size
_lowerCAmelCase : Tuple = do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean
_lowerCAmelCase : List[Any] = image_std
_lowerCAmelCase : Union[str, Any] = do_reduce_labels
def snake_case__ ( self):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A ( ):
'''simple docstring'''
_lowerCAmelCase : int = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCAmelCase : Optional[int] = Image.open(dataset[0]["file"] )
_lowerCAmelCase : Dict = Image.open(dataset[1]["file"] )
return image, map
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_lowerCAmelCase : List[str] = Image.open(ds[0]["file"] )
_lowerCAmelCase : str = Image.open(ds[1]["file"] )
_lowerCAmelCase : Tuple = Image.open(ds[2]["file"] )
_lowerCAmelCase : Tuple = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase_ ( _UpperCAmelCase , unittest.TestCase):
lowerCamelCase__ = BeitImageProcessor if is_vision_available() else None
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BeitImageProcessingTester(self)
@property
def snake_case__ ( self):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__, "do_resize"))
self.assertTrue(hasattr(a__, "size"))
self.assertTrue(hasattr(a__, "do_center_crop"))
self.assertTrue(hasattr(a__, "center_crop"))
self.assertTrue(hasattr(a__, "do_normalize"))
self.assertTrue(hasattr(a__, "image_mean"))
self.assertTrue(hasattr(a__, "image_std"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 20, "width": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
self.assertEqual(image_processor.do_reduce_labels, a__)
_lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, crop_size=84, reduce_labels=a__)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
self.assertEqual(image_processor.do_reduce_labels, a__)
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__, Image.Image)
# Test not batched input
_lowerCAmelCase : Union[str, Any] = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : str = image_processing(a__, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__, numpify=a__)
for image in image_inputs:
self.assertIsInstance(a__, np.ndarray)
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : int = image_processing(a__, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__, torchify=a__)
for image in image_inputs:
self.assertIsInstance(a__, torch.Tensor)
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0], return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
_lowerCAmelCase : Optional[int] = image_processing(a__, return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__, torchify=a__)
_lowerCAmelCase : List[Any] = []
for image in image_inputs:
self.assertIsInstance(a__, torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0], maps[0], return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(a__, a__, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test not batched input (PIL images)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = prepare_semantic_single_inputs()
_lowerCAmelCase : List[Any] = image_processing(a__, a__, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
# Test batched input (PIL images)
_lowerCAmelCase , _lowerCAmelCase : Tuple = prepare_semantic_batch_inputs()
_lowerCAmelCase : List[Any] = image_processing(a__, a__, return_tensors="pt")
self.assertEqual(
encoding["pixel_values"].shape, (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long)
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowerCAmelCase , _lowerCAmelCase : str = prepare_semantic_single_inputs()
_lowerCAmelCase : List[str] = image_processing(a__, a__, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 150)
_lowerCAmelCase : str = True
_lowerCAmelCase : str = image_processing(a__, a__, return_tensors="pt")
self.assertTrue(encoding["labels"].min().item() >= 0)
self.assertTrue(encoding["labels"].max().item() <= 255)
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 0 ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =row, column
lowerCamelCase_ =[[default_value for c in range(a__ )] for r in range(a__ )]
def __str__( self ):
"""simple docstring"""
lowerCamelCase_ =f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
lowerCamelCase_ =0
for row_vector in self.array:
for obj in row_vector:
lowerCamelCase_ =max(a__, len(str(a__ ) ) )
lowerCamelCase_ =f'''%{max_element_length}s'''
# Make string and return
def single_line(lowerCAmelCase ) -> str:
nonlocal string_format_identifier
lowerCamelCase_ ='''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a__ ) for row_vector in self.array )
return s
def __repr__( self ):
"""simple docstring"""
return str(self )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(a__, (list, tuple) ) and len(a__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
assert self.validate_indicies(a__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
assert self.validate_indicies(a__ )
lowerCamelCase_ =value
def __add__( self, lowerCAmelCase ):
"""simple docstring"""
assert isinstance(a__, a__ )
assert self.row == another.row and self.column == another.column
# Add
lowerCamelCase_ =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase_ =self[r, c] + another[r, c]
return result
def __neg__( self ):
"""simple docstring"""
lowerCamelCase_ =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase_ =-self[r, c]
return result
def __sub__( self, lowerCAmelCase ):
"""simple docstring"""
return self + (-another)
def __mul__( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(a__, (int, float) ): # Scalar multiplication
lowerCamelCase_ =Matrix(self.row, self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase_ =self[r, c] * another
return result
elif isinstance(a__, a__ ): # Matrix multiplication
assert self.column == another.row
lowerCamelCase_ =Matrix(self.row, another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCamelCase_ =f'''Unsupported type given for another ({type(a__ )})'''
raise TypeError(a__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Matrix(self.column, self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCamelCase_ =self[r, c]
return result
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
assert isinstance(a__, a__ ) and isinstance(a__, a__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCamelCase_ =v.transpose()
lowerCamelCase_ =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
"""simple docstring"""
# a^(-1)
lowerCamelCase_ =Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCamelCase_ =1
print(F'''a^(-1) is {ainv}''' )
# u, v
lowerCamelCase_ =Matrix(3 , 1 , 0 )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =1, 2, -3
lowerCamelCase_ =Matrix(3 , 1 , 0 )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(snake_case_ , snake_case_ )}''' )
def a_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 75 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 4_00_00_00 ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = [0, 1]
lowerCAmelCase_ : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCAmelCase_ : Any = 0
for j in range(len(snake_case_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
from __future__ import annotations
def lowerCAmelCase_ (lowerCAmelCase__: list[int | str] ):
"""simple docstring"""
create_state_space_tree(snake_case_ , [] , 0 , [0 for i in range(len(snake_case_ ) )] )
def lowerCAmelCase_ (lowerCAmelCase__: list[int | str] , lowerCAmelCase__: list[int | str] , lowerCAmelCase__: int , lowerCAmelCase__: list[int] , ):
"""simple docstring"""
if index == len(snake_case_ ):
print(snake_case_ )
return
for i in range(len(snake_case_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase_: Optional[int] = True
create_state_space_tree(snake_case_ , snake_case_ , index + 1 , snake_case_ )
current_sequence.pop()
UpperCAmelCase_: int = False
a : Union[str, Any] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : Optional[int] = ['A', 'B', 'C']
generate_all_permutations(sequence_a)
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
snake_case_ = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
UpperCAmelCase = 0
UpperCAmelCase = 0
while place < len(snake_case_ ):
if (place + 1 < len(snake_case_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = []
for arabic, roman in ROMAN:
((UpperCAmelCase) , (UpperCAmelCase)) = divmod(snake_case_ , snake_case_ )
result.append(roman * factor )
if number == 0:
break
return "".join(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase_ ( _a : str = "." ):
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
UpperCAmelCase_ : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_ , snake_case_ ).lstrip("""./""" )
def lowerCamelCase_ ( _a : Tuple ):
'''simple docstring'''
return F'''{i * ' '}*''' if i else "\n##"
def lowerCamelCase_ ( _a : str , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(snake_case_ )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def lowerCamelCase_ ( _a : str = "." ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """"""
for filepath in sorted(good_file_paths(snake_case_ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = os.path.split(snake_case_ )
if filepath != old_path:
UpperCAmelCase_ : Tuple = print_path(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[int] = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase_ : Any = F'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
UpperCAmelCase_ : List[str] = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(F'''{md_prefix(snake_case_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''')
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
import argparse
import os
import re
A : str = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
A : List[str] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
A : List[Any] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = content.split("\n" )
__lowerCAmelCase = []
__lowerCAmelCase = 0
while line_idx < len(snake_case_ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__lowerCAmelCase = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__lowerCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__lowerCAmelCase = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__lowerCAmelCase = sorted(snake_case_ , key=lambda _UpperCamelCase : _re_identifier.search(snake_case_ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(snake_case_ ) )
elif "\n".join(snake_case_ ) != content:
return True
def _lowerCamelCase ( _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = [os.path.join(snake_case_ , snake_case_ ) for f in os.listdir(snake_case_ ) if f.endswith(".py" )]
__lowerCAmelCase = [sort_auto_mapping(snake_case_ , overwrite=snake_case_ ) for fname in fnames]
if not overwrite and any(snake_case_ ):
__lowerCAmelCase = [f for f, d in zip(snake_case_ , snake_case_ ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(snake_case_ )}. Run `make style` to fix"
" this." )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
A : Dict = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: Dict , snake_case__: Tuple , snake_case__: Any , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Dict ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case_ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[Any]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: int ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 30 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : int = TypeVar("""DatasetType""", Dataset, IterableDataset)
def a_ ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
snake_case__ , snake_case__ = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
def a_ ( _A , _A = None , _A = None , _A = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
'is an empty dataset dictionary.' )
raise ValueError(
f'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
f'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
f'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
snake_case__ , snake_case__ = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
f'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
| 307 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ = '3.0.12'
lowerCAmelCase_ = None
def snake_case( ) -> Dict:
'''simple docstring'''
global _logger
lowercase : str = _logger or logging.getLogger(__name__ )
return _logger
class _A ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , _A : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = lock_file
return None
def __str__( self : List[str] ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _A :
def __init__( self : Union[str, Any] , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : List[Any] = lock
return None
def __enter__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.lock
def __exit__( self : List[str] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.lock.release()
return None
class _A :
def __init__( self : Union[str, Any] , _A : int , _A : str=-1 , _A : Tuple=None ) -> Optional[int]:
"""simple docstring"""
lowercase : int = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase : Optional[Any] = self.hash_filename_if_too_long(a__ , a__ )
# The path to the lock file.
lowercase : Any = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase : List[Any] = None
# The default timeout value.
lowercase : List[Any] = timeout
# We use this lock primarily for the lock counter.
lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase : Union[str, Any] = 0
return None
@property
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self._lock_file
@property
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
return self._timeout
@timeout.setter
def __a ( self : Optional[Any] , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase : Dict = float(a__ )
return None
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
raise NotImplementedError()
def __a ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@property
def __a ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self._lock_file_fd is not None
def __a ( self : Dict , _A : Any=None , _A : Optional[Any]=0.05 ) -> Tuple:
"""simple docstring"""
if timeout is None:
lowercase : Tuple = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase : Union[str, Any] = id(self )
lowercase : Any = self._lock_file
lowercase : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase : List[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __a ( self : List[Any] , _A : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase : List[Any] = id(self )
lowercase : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowercase : Any = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Optional[int] ) -> Any:
"""simple docstring"""
self.release()
return None
def __del__( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=a__ )
return None
def __a ( self : Dict , _A : str , _A : int ) -> Any:
"""simple docstring"""
lowercase : Optional[int] = os.path.basename(a__ )
if len(a__ ) > max_length and max_length > 0:
lowercase : Optional[Any] = os.path.dirname(a__ )
lowercase : Any = str(hash(a__ ) )
lowercase : int = filename[: max_length - len(a__ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a__ , a__ )
else:
return path
class _A ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , _A : List[str] , _A : List[Any]=-1 , _A : Tuple=None ) -> Dict:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
lowercase : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase : int = os.open(self._lock_file , a__ )
except OSError:
pass
else:
try:
msvcrt.locking(a__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a__ )
else:
lowercase : Any = fd
return None
def __a ( self : int ) -> Any:
"""simple docstring"""
lowercase : int = self._lock_file_fd
lowercase : Dict = None
msvcrt.locking(a__ , msvcrt.LK_UNLCK , 1 )
os.close(a__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _A ( _UpperCAmelCase ):
def __init__( self : Union[str, Any] , _A : Any , _A : List[Any]=-1 , _A : Tuple=None ) -> int:
"""simple docstring"""
lowercase : Dict = os.statvfs(os.path.dirname(a__ ) ).f_namemax
super().__init__(a__ , timeout=a__ , max_filename_length=a__ )
def __a ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase : str = os.open(self._lock_file , a__ )
try:
fcntl.flock(a__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a__ )
else:
lowercase : str = fd
return None
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Any = self._lock_file_fd
lowercase : List[str] = None
fcntl.flock(a__ , fcntl.LOCK_UN )
os.close(a__ )
return None
class _A ( _UpperCAmelCase ):
def __a ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase : Any = os.open(self._lock_file , a__ )
except OSError:
pass
else:
lowercase : Dict = fd
return None
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
os.close(self._lock_file_fd )
lowercase : Optional[int] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available') | 308 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
def get_masked_lm_array(a_: str ):
_UpperCAmelCase : Any = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : List[str] = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[Any] = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_array(a_: str ):
_UpperCAmelCase : Union[str, Any] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : int = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Dict = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_layer_array(a_: int, a_: str ):
_UpperCAmelCase : Dict = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : List[Any] = tf.train.load_variable(snake_case_, snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(snake_case_ )
def get_encoder_attention_layer_array(a_: int, a_: str, a_: Any ):
_UpperCAmelCase : int = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCAmelCase : Dict = tf.train.load_variable(snake_case_, snake_case_ )
_UpperCAmelCase : Dict = array.reshape(snake_case_ )
if "kernel" in name:
_UpperCAmelCase : Optional[int] = array.transpose()
return torch.from_numpy(snake_case_ )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCAmelCase : List[Any] = BertConfig.from_json_file(snake_case_ )
_UpperCAmelCase : Any = BertForMaskedLM(snake_case_ )
# Layers
for layer_index in range(0, config.num_hidden_layers ):
_UpperCAmelCase : Optional[Any] = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCAmelCase : Optional[Any] = layer.attention.self
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_query_dense/kernel", self_attn.query.weight.data.shape )
_UpperCAmelCase : Optional[int] = get_encoder_attention_layer_array(
snake_case_, "_query_dense/bias", self_attn.query.bias.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_key_dense/kernel", self_attn.key.weight.data.shape )
_UpperCAmelCase : List[Any] = get_encoder_attention_layer_array(
snake_case_, "_key_dense/bias", self_attn.key.bias.data.shape )
_UpperCAmelCase : int = get_encoder_attention_layer_array(
snake_case_, "_value_dense/kernel", self_attn.value.weight.data.shape )
_UpperCAmelCase : int = get_encoder_attention_layer_array(
snake_case_, "_value_dense/bias", self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCAmelCase : Union[str, Any] = layer.attention.output
_UpperCAmelCase : Dict = get_encoder_attention_layer_array(
snake_case_, "_output_dense/kernel", self_output.dense.weight.data.shape )
_UpperCAmelCase : List[str] = get_encoder_attention_layer_array(
snake_case_, "_output_dense/bias", self_output.dense.bias.data.shape )
_UpperCAmelCase : Optional[Any] = get_encoder_layer_array(snake_case_, "_attention_layer_norm/gamma" )
_UpperCAmelCase : str = get_encoder_layer_array(snake_case_, "_attention_layer_norm/beta" )
# Intermediate
_UpperCAmelCase : Any = layer.intermediate
_UpperCAmelCase : str = get_encoder_layer_array(snake_case_, "_intermediate_dense/kernel" )
_UpperCAmelCase : Any = get_encoder_layer_array(snake_case_, "_intermediate_dense/bias" )
# Output
_UpperCAmelCase : Optional[int] = layer.output
_UpperCAmelCase : Tuple = get_encoder_layer_array(snake_case_, "_output_dense/kernel" )
_UpperCAmelCase : Any = get_encoder_layer_array(snake_case_, "_output_dense/bias" )
_UpperCAmelCase : Union[str, Any] = get_encoder_layer_array(snake_case_, "_output_layer_norm/gamma" )
_UpperCAmelCase : int = get_encoder_layer_array(snake_case_, "_output_layer_norm/beta" )
# Embeddings
_UpperCAmelCase : Optional[Any] = get_encoder_array("_position_embedding_layer/embeddings" )
_UpperCAmelCase : Optional[int] = get_encoder_array("_type_embedding_layer/embeddings" )
_UpperCAmelCase : Optional[int] = get_encoder_array("_embedding_norm_layer/gamma" )
_UpperCAmelCase : Tuple = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
_UpperCAmelCase : Tuple = model.cls.predictions.transform
_UpperCAmelCase : Optional[int] = get_masked_lm_array("dense/kernel" )
_UpperCAmelCase : Optional[int] = get_masked_lm_array("dense/bias" )
_UpperCAmelCase : int = get_masked_lm_array("layer_norm/gamma" )
_UpperCAmelCase : int = get_masked_lm_array("layer_norm/beta" )
_UpperCAmelCase : Optional[Any] = get_masked_lm_array("embedding_table" )
# Pooling
_UpperCAmelCase : Optional[int] = BertPooler(config=snake_case_ )
_UpperCAmelCase : List[str] = get_encoder_array("_pooler_layer/kernel" )
_UpperCAmelCase : str = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(snake_case_ )
# Integration test - should load without any errors ;)
_UpperCAmelCase : Optional[Any] = BertForMaskedLM.from_pretrained(snake_case_ )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__a = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowercase : Optional[datasets.Features] =None
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowercase : Optional[int] =PandasConfig
def lowercase__ ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ =dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__, (str, list, tuple) ):
lowerCamelCase_ =data_files
if isinstance(a__, a__ ):
lowerCamelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ =[dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'''files''': files} )]
lowerCamelCase_ =[]
for split_name, files in data_files.items():
if isinstance(a__, a__ ):
lowerCamelCase_ =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ =[dl_manager.iter_files(a__ ) for file in files]
splits.append(datasets.SplitGenerator(name=a__, gen_kwargs={'''files''': files} ) )
return splits
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ =table_cast(a__, self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__, '''rb''' ) as f:
lowerCamelCase_ =pa.Table.from_pandas(pd.read_pickle(a__ ) )
yield i, self._cast_table(a__ )
| 75 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_ ( A__ : list[int] ): # This function is recursive
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase_ : Tuple = array[0]
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : int = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase_ : str = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
lowerCAmelCase_ : Any = temp_array
else:
i += 1
lowerCAmelCase_ : str = [element for element in array[1:] if element >= pivot]
lowerCAmelCase_ : Any = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( unittest.TestCase ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=18, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, ) -> List[str]:
UpperCAmelCase_: List[Any] = size if size is not None else {"""shortest_edge""": 20}
UpperCAmelCase_: Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase_: Optional[int] = parent
UpperCAmelCase_: Optional[int] = batch_size
UpperCAmelCase_: List[str] = num_channels
UpperCAmelCase_: Tuple = image_size
UpperCAmelCase_: List[str] = min_resolution
UpperCAmelCase_: List[str] = max_resolution
UpperCAmelCase_: List[str] = do_resize
UpperCAmelCase_: Optional[int] = size
UpperCAmelCase_: Tuple = do_center_crop
UpperCAmelCase_: List[str] = crop_size
UpperCAmelCase_: int = do_flip_channel_order
def __snake_case (self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _a ( _UpperCAmelCase , unittest.TestCase ):
A = MobileViTImageProcessor if is_vision_available() else None
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Any = MobileViTImageProcessingTester(self )
@property
def __snake_case (self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case (self ) -> str:
UpperCAmelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__, """do_resize""" ) )
self.assertTrue(hasattr(a__, """size""" ) )
self.assertTrue(hasattr(a__, """do_center_crop""" ) )
self.assertTrue(hasattr(a__, """center_crop""" ) )
self.assertTrue(hasattr(a__, """do_flip_channel_order""" ) )
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size, {"""height""": 18, """width""": 18} )
UpperCAmelCase_: Tuple = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size, {"""height""": 84, """width""": 84} )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_: List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__, Image.Image )
# Test not batched input
UpperCAmelCase_: Any = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
UpperCAmelCase_: Tuple = image_processing(a__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
def __snake_case (self ) -> Any:
UpperCAmelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_: Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__, numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__, np.ndarray )
# Test not batched input
UpperCAmelCase_: Union[str, Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
UpperCAmelCase_: Union[str, Any] = image_processing(a__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=a__, torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__, torch.Tensor )
# Test not batched input
UpperCAmelCase_: Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
# Test batched
UpperCAmelCase_: int = image_processing(a__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
), )
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case_ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 78 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if number > 0:
raise ValueError("""input must be a negative integer""" )
UpperCAmelCase_ : Dict = len(bin(snake_case_ )[3:] )
UpperCAmelCase_ : Dict = bin(abs(snake_case_ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ : Dict = (
(
"""1"""
+ """0""" * (binary_number_length - len(snake_case_ ))
+ twos_complement_number
)
if number < 0
else """0"""
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def a (*a__ : List[str] , **a__ : List[str] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( snake_case_ : int ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case_ = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
A_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def a (self : List[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model=a__ , tokenizer=a__ , image_processor=a__ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
__snake_case = '''What is the placebo?'''
__snake_case = [
{
'''image''': load_image(a__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def a (self : Union[str, Any] , a__ : Optional[int] , a__ : Dict ):
"""simple docstring"""
__snake_case = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
{'''score''': ANY(a__ ), '''answer''': ANY(a__ ), '''start''': ANY(a__ ), '''end''': ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def a (self : Dict ):
"""simple docstring"""
__snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case = INVOICE_URL
__snake_case = '''How many cats are there?'''
__snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : str ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Tuple ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def a (self : Dict ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a__ )
__snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a__ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(a__ ) , a__ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def a (self : Tuple ):
"""simple docstring"""
__snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case = INVOICE_URL
__snake_case = '''What is the invoice number?'''
__snake_case = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def a (self : List[str] ):
"""simple docstring"""
pass
| 24 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A : Optional[Any] = logging.get_logger(__name__)
A : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
A : List[str] = {
"yjernite/retribert-base-uncased": 5_1_2,
}
A : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class _UpperCamelCase ( _UpperCAmelCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple =VOCAB_FILES_NAMES
__UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Any =PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Optional[int] =RetriBertTokenizer
__UpperCAmelCase : Union[str, Any] =['input_ids', 'attention_mask']
def __init__( self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a__ ) != do_lower_case
or normalizer_state.get("strip_accents" , a__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a__ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(a__ , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**a__ )
__lowerCAmelCase = do_lower_case
def snake_case ( self , __a , __a=None ):
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 57 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
lowercase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _lowercase ( self : Tuple ) -> Optional[int]:
lowercase_ = self.dummy_uncond_unet
lowercase_ = PNDMScheduler()
lowercase_ = PNDMPipeline(unet=a__ , scheduler=a__ )
pndm.to(a__ )
pndm.set_progress_bar_config(disable=a__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pndm(generator=a__ , num_inference_steps=2_0 , output_type='''numpy''' ).images
lowercase_ = torch.manual_seed(0 )
lowercase_ = pndm(generator=a__ , num_inference_steps=2_0 , output_type='''numpy''' , return_dict=a__ )[0]
lowercase_ = image[0, -3:, -3:, -1]
lowercase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> Union[str, Any]:
lowercase_ = '''google/ddpm-cifar10-32'''
lowercase_ = UNetaDModel.from_pretrained(a__ )
lowercase_ = PNDMScheduler()
lowercase_ = PNDMPipeline(unet=a__ , scheduler=a__ )
pndm.to(a__ )
pndm.set_progress_bar_config(disable=a__ )
lowercase_ = torch.manual_seed(0 )
lowercase_ = pndm(generator=a__ , output_type='''numpy''' ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowercase_ = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 30 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : List[Any] , a__ : Dict=13 , a__ : str=32 , a__ : Tuple=3 , a__ : Optional[Any]=4 , a__ : Optional[int]=[10, 20, 30, 40] , a__ : List[Any]=[2, 2, 3, 2] , a__ : List[Any]=True , a__ : int=True , a__ : List[Any]=37 , a__ : Any="gelu" , a__ : int=10 , a__ : Dict=0.0_2 , a__ : Dict=["stage2", "stage3", "stage4"] , a__ : Tuple=[2, 3, 4] , a__ : List[str]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = num_stages
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = out_features
__snake_case = out_indices
__snake_case = scope
def a (self : Dict ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a (self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a (self : Optional[Any] , a__ : List[Any] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Tuple , a__ : List[Any] , a__ : List[str] , a__ : List[str] ):
"""simple docstring"""
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case = None
__snake_case = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Dict = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ : Optional[Any] = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Optional[Any] = False
A_ : int = False
A_ : int = False
A_ : List[str] = False
def a (self : List[str] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : str ):
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def a (self : Dict ):
"""simple docstring"""
def check_hidden_states_output(a__ : List[str] , a__ : str , a__ : Tuple ):
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(a__ , a__ ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Tuple ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__snake_case = model(**a__ )
# verify the logits
__snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , _UpperCAmelCase ):
A_ : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else ()
A_ : List[Any] = ConvNextConfig
A_ : Optional[Any] = False
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ConvNextModelTester(self )
| 24 | 0 |
def a_ ( _A=28123 ) -> int:
"""simple docstring"""
snake_case__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case__ = set()
snake_case__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _A ( _UpperCAmelCase ):
_UpperCamelCase : List[Any] = 'pegasus'
_UpperCamelCase : Union[str, Any] = ['past_key_values']
_UpperCamelCase : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Any , _A : Optional[int]=50_265 , _A : int=1_024 , _A : List[str]=12 , _A : List[Any]=4_096 , _A : List[str]=16 , _A : Any=12 , _A : Union[str, Any]=4_096 , _A : Tuple=16 , _A : Optional[Any]=0.0 , _A : Any=0.0 , _A : List[str]=True , _A : Optional[int]=True , _A : Union[str, Any]="gelu" , _A : List[str]=1_024 , _A : int=0.1 , _A : List[str]=0.0 , _A : int=0.0 , _A : Optional[Any]=0.02 , _A : Optional[int]=0 , _A : Tuple=False , _A : List[Any]=0 , _A : Any=1 , _A : List[Any]=1 , **_A : List[Any] , ) -> Tuple:
"""simple docstring"""
lowercase : str = vocab_size
lowercase : Optional[Any] = max_position_embeddings
lowercase : Optional[Any] = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Any = decoder_ffn_dim
lowercase : str = decoder_layers
lowercase : Optional[int] = decoder_attention_heads
lowercase : Dict = dropout
lowercase : Tuple = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Any = init_std
lowercase : Optional[Any] = encoder_layerdrop
lowercase : Any = decoder_layerdrop
lowercase : Any = use_cache
lowercase : Optional[Any] = encoder_layers
lowercase : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def __a ( self : Any ) -> Tuple:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.d_model | 308 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = BartphoTokenizer
A_ : List[str] = False
A_ : Optional[Any] = True
def a (self : Tuple ):
"""simple docstring"""
super().setUp()
__snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""" )
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a (self : str , **a__ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a (self : str , a__ : Any ):
"""simple docstring"""
__snake_case = '''This is a là test'''
__snake_case = '''This is a<unk><unk> test'''
return input_text, output_text
def a (self : Dict ):
"""simple docstring"""
__snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map )
__snake_case = '''This is a là test'''
__snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = tokens + [tokenizer.unk_token]
__snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
| 24 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __UpperCAmelCase ( a_: Optional[Any], a_: Any, a_: Optional[Any], a_: List[Any], a_: str ):
_UpperCAmelCase : Tuple = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(snake_case_, "r" ) as f:
_UpperCAmelCase : str = f.readlines()
_UpperCAmelCase : Any = f"""class {class_name}("""
_UpperCAmelCase : Any = f"""{4 * ' '}def {test_name}("""
_UpperCAmelCase : Dict = f"""{8 * ' '}{correct_line.split()[0]}"""
_UpperCAmelCase : Union[str, Any] = f"""{16 * ' '}{correct_line.split()[0]}"""
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Any = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0
_UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(snake_case_ ):
_UpperCAmelCase : Union[str, Any] = True
elif in_class and line.startswith(snake_case_ ):
_UpperCAmelCase : Optional[int] = True
elif in_class and in_func and (line.startswith(snake_case_ ) or line.startswith(snake_case_ )):
_UpperCAmelCase : List[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_UpperCAmelCase : Optional[Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_UpperCAmelCase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * ' '}{correct_line}""" )
_UpperCAmelCase : Dict = False
else:
new_lines.append(snake_case_ )
with open(snake_case_, "w" ) as f:
for line in new_lines:
f.write(snake_case_ )
def __UpperCAmelCase ( a_: Dict, a_: List[str]=None ):
if fail is not None:
with open(snake_case_, "r" ) as f:
_UpperCAmelCase : List[Any] = {l.strip() for l in f.readlines()}
else:
_UpperCAmelCase : List[Any] = None
with open(snake_case_, "r" ) as f:
_UpperCAmelCase : Any = f.readlines()
_UpperCAmelCase : Optional[Any] = defaultdict(snake_case_ )
for line in correct_lines:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
__a = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 145 |
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
__snake_case = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
while b > 0:
if b & 1:
_lowerCAmelCase : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 36 |
from math import loga
def lowerCamelCase__ ( snake_case_ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case_ , snake_case_ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ : Union[str, Any] = datasets.load_iris()
a_ : Union[str, Any] = np.array(data["""data"""])
a_ : Any = np.array(data["""target"""])
a_ : List[str] = data["""target_names"""]
a_ , a_ , a_ , a_ : str = train_test_split(X, y)
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
return np.linalg.norm(np.array(snake_case_ ) - np.array(snake_case_ ) )
def a_ ( __snake_case : int , __snake_case : str , __snake_case : str , __snake_case : Optional[int] , __snake_case : str=5 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =zip(snake_case_ , snake_case_ )
# List of distances of all points from the point to be classified
lowerCamelCase_ =[]
for data_point in data:
lowerCamelCase_ =euclidean_distance(data_point[0] , snake_case_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCamelCase_ =[i[1] for i in sorted(snake_case_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCamelCase_ =Counter(snake_case_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 75 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : Optional[int] , *a__ : Any , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[int] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : Tuple ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : Dict , **a__ : Any ):
"""simple docstring"""
return {}, {}, {}
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : int , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 24 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def UpperCamelCase_ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case_ ):
http_head("""https://huggingface.co""" )
| 120 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCamelCase__ ( ) -> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__snake_case = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCamelCase__ ( ) -> Any:
assert _test_patching.open is open
__snake_case = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , snake_case_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> List[str]:
# pandas.read_csv is not present in _test_patching
__snake_case = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , snake_case_ ):
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__snake_case = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , snake_case_ ) is None
with patch_submodule(_test_patching , '''len''' , snake_case_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = '''__test_patch_submodule_start_and_stop_mock__'''
__snake_case = patch_submodule(_test_patching , '''open''' , snake_case_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCamelCase__ ( ) -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__snake_case = '''__test_patch_submodule_successive_join__'''
__snake_case = '''__test_patch_submodule_successive_dirname__'''
__snake_case = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.join''' , snake_case_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , snake_case_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCamelCase__ ( ) -> Tuple:
__snake_case = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , snake_case_ ):
pass
| 24 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
snake_case_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
A_ : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
A_ : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
A_ : Optional[int] = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
A_ : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
A_ : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
A_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
A_ : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
A_ : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : Dict ) -> str:
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(snake_case_ , os.path.join(snake_case_ , f"""{split}_results.json""" ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
check_output_dir(snake_case_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
assert hasattr(snake_case_ , snake_case_ ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=snake_case_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(snake_case_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__snake_case = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(snake_case_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(snake_case_ , snake_case_ ):
__snake_case = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__snake_case = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(snake_case_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__snake_case = SeqaSeqDataset
# Get datasets
__snake_case = (
dataset_class(
snake_case_ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__snake_case = (
dataset_class(
snake_case_ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__snake_case = (
build_compute_metrics_fn(data_args.task , snake_case_ ) if training_args.predict_with_generate else None
)
__snake_case = SeqaSeqTrainer(
model=snake_case_ , args=snake_case_ , data_args=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , data_collator=SeqaSeqDataCollator(
snake_case_ , snake_case_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=snake_case_ , tokenizer=snake_case_ , )
__snake_case = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__snake_case = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__snake_case = train_result.metrics
__snake_case = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case = trainer.evaluate(metric_key_prefix='''val''' )
__snake_case = data_args.n_val
__snake_case = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__snake_case = trainer.predict(test_dataset=snake_case_ , metric_key_prefix='''test''' )
__snake_case = test_output.metrics
__snake_case = data_args.n_test
if trainer.is_world_process_zero():
__snake_case = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , snake_case_ , training_args.output_dir )
all_metrics.update(snake_case_ )
if training_args.predict_with_generate:
__snake_case = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
__snake_case = lmap(str.strip , snake_case_ )
write_txt_file(snake_case_ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(snake_case_ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( snake_case_ : Optional[Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 24 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabinMiller.generate_large_prime(snake_case_ )
print('Generating prime q...' )
UpperCAmelCase = rabinMiller.generate_large_prime(snake_case_ )
UpperCAmelCase = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
UpperCAmelCase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(snake_case_ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
UpperCAmelCase = cryptoMath.find_mod_inverse(snake_case_ , (p - 1) * (q - 1) )
UpperCAmelCase = (n, e)
UpperCAmelCase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(snake_case_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 78 |
from math import pi
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : int ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 24 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase_ = {
'''Salesforce/codegen-350M-mono''': 2048,
}
class _snake_case ( _UpperCAmelCase ):
'''simple docstring'''
A__ : Dict = VOCAB_FILES_NAMES
A__ : int = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Union[str, Any] = ['input_ids', 'attention_mask']
A__ : Any = CodeGenTokenizer
def __init__( self: Optional[int] ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=None ,lowerCamelCase_: Optional[int]=None ,lowerCamelCase_: Tuple="<|endoftext|>" ,lowerCamelCase_: Tuple="<|endoftext|>" ,lowerCamelCase_: Any="<|endoftext|>" ,lowerCamelCase_: Any=False ,**lowerCamelCase_: Dict ,) -> List[Any]:
super().__init__(
a__ ,a__ ,tokenizer_file=a__ ,unk_token=a__ ,bos_token=a__ ,eos_token=a__ ,add_prefix_space=a__ ,**a__ ,)
if kwargs.pop("""add_bos_token""" ,a__ ):
UpperCAmelCase_ : Union[str, Any] = kwargs.pop("""name_or_path""" ,"""""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
UpperCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,a__ ) != add_prefix_space:
UpperCAmelCase_ : Union[str, Any] = getattr(a__ ,pre_tok_state.pop("""type""" ) )
UpperCAmelCase_ : str = add_prefix_space
UpperCAmelCase_ : Optional[Any] = pre_tok_class(**a__ )
UpperCAmelCase_ : Optional[Any] = add_prefix_space
def A__ ( self: Tuple ,*lowerCamelCase_: Any ,**lowerCamelCase_: Optional[int] ) -> str:
UpperCAmelCase_ : int = kwargs.get("""is_split_into_words""" ,a__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a__ ,**a__ )
def A__ ( self: Optional[int] ,*lowerCamelCase_: List[str] ,**lowerCamelCase_: Optional[Any] ) -> str:
UpperCAmelCase_ : List[Any] = kwargs.get("""is_split_into_words""" ,a__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a__ ,**a__ )
def A__ ( self: List[str] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(a__ ,name=a__ )
return tuple(a__ )
def A__ ( self: Dict ,lowerCamelCase_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] ,lowerCamelCase_: bool = False ,lowerCamelCase_: bool = None ,lowerCamelCase_: Optional[List[str]] = None ,**lowerCamelCase_: List[str] ,) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = super().decode(
token_ids=a__ ,skip_special_tokens=a__ ,clean_up_tokenization_spaces=a__ ,**a__ ,)
if truncate_before_pattern is not None and len(a__ ) > 0:
UpperCAmelCase_ : Any = self.truncate(a__ ,a__ )
return decoded_text
def A__ ( self: Any ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[int] ) -> List[str]:
def find_re(lowerCamelCase_: str ,lowerCamelCase_: Any ,lowerCamelCase_: Any ):
UpperCAmelCase_ : Tuple = pattern.search(a__ ,a__ )
return m.start() if m else -1
UpperCAmelCase_ : str = [re.compile(a__ ,re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase_ : Union[str, Any] = list(re.finditer("""^print""" ,a__ ,re.MULTILINE ) )
if len(a__ ) > 1:
UpperCAmelCase_ : List[Any] = completion[: prints[1].start()]
UpperCAmelCase_ : Optional[Any] = list(re.finditer("""^def""" ,a__ ,re.MULTILINE ) )
if len(a__ ) > 1:
UpperCAmelCase_ : Any = completion[: defs[1].start()]
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = [
pos for pos in [find_re(a__ ,a__ ,a__ ) for terminal in terminals] if pos != -1
]
if len(a__ ) > 0:
return completion[: min(a__ )]
else:
return completion
| 345 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : Optional[Any] = "http://www.mocksite.com/file1.txt"
A : Optional[Any] = "\"text\": [\"foo\", \"foo\"]"
A : List[str] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =2_0_0
__UpperCAmelCase : List[str] ={'Content-Length': '100'}
__UpperCAmelCase : int ={}
def snake_case ( self , **__a ):
return [bytes(a__ , "utf-8" )]
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
import requests
monkeypatch.setattr(snake_case_ , "request" , snake_case_ )
__lowerCAmelCase = URL
if issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = url
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = [url]
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = {"train": url}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = "downloads"
__lowerCAmelCase = tmp_path
__lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(snake_case_ , snake_case_ ) , use_etag=snake_case_ , )
__lowerCAmelCase = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__lowerCAmelCase = dl_manager.download(snake_case_ )
__lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [downloaded_paths]
__lowerCAmelCase = [urls]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in downloaded_paths.keys()
__lowerCAmelCase = downloaded_paths.values()
__lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case_ , snake_case_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCAmelCase = Path(snake_case_ )
__lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCAmelCase = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = str(snake_case_ )
if issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = filename
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = [filename]
elif issubclass(snake_case_ , snake_case_ ):
__lowerCAmelCase = {"train": filename}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = xz_file.parent
__lowerCAmelCase = "extracted"
__lowerCAmelCase = DownloadConfig(
cache_dir=snake_case_ , use_etag=snake_case_ , )
__lowerCAmelCase = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__lowerCAmelCase = dl_manager.extract(snake_case_ )
__lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case_ , snake_case_ ):
__lowerCAmelCase = [extracted_paths]
__lowerCAmelCase = [paths]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in extracted_paths.keys()
__lowerCAmelCase = extracted_paths.values()
__lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case_ , snake_case_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCAmelCase = Path(snake_case_ )
__lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case_ , etag=snake_case_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCAmelCase = extracted_path.read_text()
__lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(snake_case_ , start=1 ):
__lowerCAmelCase = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(snake_case_ )
__lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(snake_case_ )
__lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case_ ) , start=1 ):
assert os.path.basename(snake_case_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 57 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def a (self : int , a__ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
__snake_case = np.random.RandomState(a__ )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
# warmup pass to apply optimizations
__snake_case = pipe(**self.get_dummy_inputs() )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Any ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : Dict ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs()
__snake_case = pipe(**a__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def a (self : List[str] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ort.SessionOptions()
__snake_case = False
return options
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case = init_image.resize((768, 512) )
__snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = '''A fantasy landscape, trending on artstation'''
__snake_case = np.random.RandomState(0 )
__snake_case = pipe(
prompt=a__ , image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
__snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def a ( snake_case__: str = "" ):
'''simple docstring'''
lowercase_ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250'''
lowercase_ = BeautifulSoup(requests.get(snake_case_ ).text , '''html.parser''' )
lowercase_ = soup.find_all('''td''' , attrs='''titleColumn''' )
lowercase_ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(snake_case_ , snake_case_ )
}
def a ( snake_case__: str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
lowercase_ = get_imdb_top_aaa_movies()
with open(snake_case_ , '''w''' , newline='''''' ) as out_file:
lowercase_ = csv.writer(snake_case_ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 30 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24 | 0 |
def a_ ( _A , _A ) -> bool:
"""simple docstring"""
snake_case__ = len(snake_case_ ) + 1
snake_case__ = len(snake_case_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case__ = [[0 for i in range(snake_case_ )] for j in range(snake_case_ )]
# since string of zero length match pattern of zero length
snake_case__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case_ ):
snake_case__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case_ ):
snake_case__ = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case_ ):
for j in range(1 , snake_case_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case__ = dp[i - 1][j]
else:
snake_case__ = 0
else:
snake_case__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__UpperCamelCase : Optional[int] = """aab"""
__UpperCamelCase : List[str] = """c*a*b"""
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 307 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'CLIPImageProcessor'
A_ : Any = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self : int , a__ : int=None , a__ : Dict=None , **a__ : List[str] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a__ , a__ )
def __call__(self : Any , a__ : Dict=None , a__ : List[str]=None , a__ : Dict=None , **a__ : Tuple ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def a (self : Union[str, Any] , *a__ : int , **a__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a__ , **a__ )
def a (self : Any , *a__ : List[Any] , **a__ : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*a__ , **a__ )
@property
def a (self : int ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24 | 0 |
from __future__ import annotations
def snake_case( __magic_name__ , __magic_name__ ) -> list[list[int]]:
'''simple docstring'''
lowercase : Union[str, Any] = []
lowercase : Tuple = []
lowercase : Union[str, Any] = 0
lowercase : List[str] = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> None:
'''simple docstring'''
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
lowerCAmelCase_ = [3, 34, 4, 12, 5, 2]
lowerCAmelCase_ = 9
lowerCAmelCase_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 308 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a__ ) as cl:
logger.warning(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(a__ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def a (self : Dict ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , a__ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
a__ , a__ , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def a (self : List[Any] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(a__ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def a (self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
__snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a__ ) as cl:
logger.warning_advice(a__ )
self.assertEqual(cl.out , msg + '''\n''' )
def lowerCamelCase__ ( ) -> str:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 24 | 0 |
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a = 'src/transformers'
__a = 'docs/source/en/tasks'
def __UpperCAmelCase ( a_: Union[str, Any], a_: List[Any], a_: Any ):
with open(snake_case_, "r", encoding="utf-8", newline="\n" ) as f:
_UpperCAmelCase : int = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Any = 0
while not lines[start_index].startswith(snake_case_ ):
start_index += 1
start_index += 1
_UpperCAmelCase : Optional[Any] = start_index
while not lines[end_index].startswith(snake_case_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
__a = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __UpperCAmelCase ( a_: Tuple ):
_UpperCAmelCase : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase : Optional[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case_, set() )
_UpperCAmelCase : List[str] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __UpperCAmelCase ( a_: int, a_: Optional[int]=False ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = _find_text_in_file(
filename=os.path.join(snake_case_, snake_case_ ), start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->", end_prompt="<!--End of the generated tip-->", )
_UpperCAmelCase : List[str] = get_model_list_for_task(snake_case_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case_, snake_case_ ), "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 145 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = CpmAntTokenizer
A_ : Optional[int] = False
def a (self : Optional[int] ):
"""simple docstring"""
super().setUp()
__snake_case = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def a (self : Dict ):
"""simple docstring"""
__snake_case = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case = '''今天天气真好!'''
__snake_case = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__snake_case = '''今天天气真好!'''
__snake_case = [tokenizer.bos_token] + tokens
__snake_case = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
__snake_case = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 24 | 0 |
from manim import *
class UpperCAmelCase_ ( _UpperCAmelCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Rectangle(height=0.5, width=0.5)
_lowerCAmelCase : Dict = Rectangle(height=0.25, width=0.25)
_lowerCAmelCase : Optional[Any] = Rectangle(height=0.46, width=0.46).set_stroke(width=0)
_lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6)]
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6)]
_lowerCAmelCase : str = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : List[Any] = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : Union[str, Any] = VGroup(a__, a__).arrange(a__, buff=0)
_lowerCAmelCase : Tuple = Text("CPU", font_size=24)
_lowerCAmelCase : Optional[Any] = Group(a__, a__).arrange(a__, buff=0.5, aligned_edge=a__)
cpu.move_to([-2.5, -0.5, 0])
self.add(a__)
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(4)]
_lowerCAmelCase : Tuple = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : int = Text("GPU", font_size=24)
_lowerCAmelCase : Optional[int] = Group(a__, a__).arrange(a__, buff=0.5, aligned_edge=a__)
gpu.move_to([-1, -1, 0])
self.add(a__)
_lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6)]
_lowerCAmelCase : str = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : List[str] = Text("Model", font_size=24)
_lowerCAmelCase : Optional[Any] = Group(a__, a__).arrange(a__, buff=0.5, aligned_edge=a__)
model.move_to([3, -1.0, 0])
self.add(a__)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = []
for i, rect in enumerate(a__):
rect.set_stroke(a__)
_lowerCAmelCase : Union[str, Any] = Rectangle(height=0.46 / 4, width=0.46 / 3).set_stroke(width=0.0).set_fill(a__, opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT), buff=0.02, direction=a__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=a__, buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=a__, buff=0.0)
self.add(a__)
model_cpu_arr.append(a__)
self.add(*a__, *a__, *a__)
_lowerCAmelCase : int = [mem.copy() for i in range(6)]
_lowerCAmelCase : Optional[int] = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : str = Text("Loaded Checkpoint", font_size=24)
_lowerCAmelCase : Dict = Group(a__, a__).arrange(a__, buff=0.5, aligned_edge=a__)
checkpoint.move_to([3, 0.5, 0])
self.add(a__)
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = []
for i, rect in enumerate(a__):
_lowerCAmelCase : Optional[Any] = fill.copy().set_fill(a__, opacity=0.7)
target.move_to(a__)
ckpt_arr.append(a__)
_lowerCAmelCase : Tuple = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(a__)
self.add(*a__, *a__)
_lowerCAmelCase : Union[str, Any] = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_lowerCAmelCase : Any = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0])
self.add(a__, a__)
_lowerCAmelCase : List[Any] = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, )
blue_text.next_to(a__, DOWN * 2.4, aligned_edge=key_text.get_left())
self.add(a__)
_lowerCAmelCase : List[Any] = MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.", font_size=24, )
step_a.move_to([2, 2, 0])
_lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6)]
_lowerCAmelCase : Optional[int] = [meta_mem.copy() for i in range(6)]
_lowerCAmelCase : Union[str, Any] = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : Dict = VGroup(*a__).arrange(a__, buff=0)
_lowerCAmelCase : Union[str, Any] = VGroup(a__, a__).arrange(a__, buff=0)
_lowerCAmelCase : Optional[int] = Text("Disk", font_size=24)
_lowerCAmelCase : str = Group(a__, a__).arrange(a__, buff=0.5, aligned_edge=a__)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(a__, run_time=3), Write(a__, run_time=1), Create(a__, run_time=1))
_lowerCAmelCase : Optional[int] = []
for i, rect in enumerate(a__):
_lowerCAmelCase : Tuple = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(a__, run_time=1.5))
self.play(*a__)
self.play(FadeOut(a__))
_lowerCAmelCase : Dict = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection.", font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(a__, run_time=3))
self.play(
FadeOut(a__, a__, *a__, *a__), )
self.wait()
| 36 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
def __init__(self : str , a__ : Dict , a__ : Tuple=None , a__ : List[Any]=None , a__ : Dict=None , a__ : Union[str, Any]="resnet50" , a__ : Dict=3 , a__ : str=32 , a__ : int=3 , a__ : Dict=True , a__ : Any=True , ):
"""simple docstring"""
__snake_case = parent
__snake_case = out_indices if out_indices is not None else [4]
__snake_case = stage_names
__snake_case = out_features
__snake_case = backbone
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = use_pretrained_backbone
__snake_case = is_training
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def a (self : Any ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def a (self : List[Any] , a__ : int , a__ : int ):
"""simple docstring"""
__snake_case = TimmBackbone(config=a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__snake_case = model(a__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def a (self : str ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Optional[Any] = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
A_ : List[Any] = False
A_ : Dict = False
A_ : Any = False
A_ : List[Any] = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TimmBackboneModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def a (self : Any ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a (self : int ):
"""simple docstring"""
__snake_case = '''resnet18'''
__snake_case = '''microsoft/resnet-18'''
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ )
__snake_case = AutoBackbone.from_pretrained(a__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__snake_case = AutoBackbone.from_pretrained(a__ , use_timm_backbone=a__ , out_indices=[1, 2, 3] )
__snake_case = AutoBackbone.from_pretrained(a__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def a (self : str ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def a (self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : Dict ):
"""simple docstring"""
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def a (self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = self.has_attentions
# no need to test all models as different heads yield the same functionality
__snake_case = self.all_model_classes[0]
__snake_case = model_class(a__ )
model.to(a__ )
__snake_case = self._prepare_for_class(a__ , a__ )
__snake_case = model(**a__ )
__snake_case = outputs[0][-1]
# Encoder-/Decoder-only models
__snake_case = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__snake_case = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=a__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__snake_case = copy.deepcopy(a__ )
__snake_case = None
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__snake_case = copy.deepcopy(a__ )
__snake_case = False
__snake_case = model_class(a__ )
model.to(a__ )
model.eval()
__snake_case = model(**a__ )
| 24 | 0 |
'''simple docstring'''
import socket
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase_ =socket.gethostname()
lowerCamelCase_ =1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowerCamelCase_ =sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 75 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Dict=0.0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "geglu" , lowerCamelCase : Optional[int] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : str = "layer_norm" , lowerCamelCase : bool = False , ) -> Dict:
super().__init__()
lowerCAmelCase_ : Dict = only_cross_attention
lowerCAmelCase_ : Optional[int] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase_ : int = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase_ : Any = AdaLayerNorm(a__ , a__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_ : List[str] = AdaLayerNormZero(a__ , a__ )
else:
lowerCAmelCase_ : str = nn.LayerNorm(a__ , elementwise_affine=a__ )
lowerCAmelCase_ : Union[str, Any] = Attention(
query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase_ : Any = (
AdaLayerNorm(a__ , a__ )
if self.use_ada_layer_norm
else nn.LayerNorm(a__ , elementwise_affine=a__ )
)
lowerCAmelCase_ : Dict = Attention(
query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : List[Any] = None
# 3. Feed-forward
lowerCAmelCase_ : Optional[Any] = nn.LayerNorm(a__ , elementwise_affine=a__ )
lowerCAmelCase_ : Union[str, Any] = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ )
# let chunk size default to None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = 0
def __lowercase ( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : int ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = chunk_size
lowerCAmelCase_ : Optional[Any] = dim
def __lowercase ( self : Dict , lowerCamelCase : torch.FloatTensor , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[torch.LongTensor] = None , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[torch.LongTensor] = None , ) -> Union[str, Any]:
if self.use_ada_layer_norm:
lowerCAmelCase_ : Optional[Any] = self.norma(a__ , a__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.norma(
a__ , a__ , a__ , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase_ : List[Any] = self.norma(a__ )
lowerCAmelCase_ : Any = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase_ : Dict = self.attna(
a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ : int = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase_ : int = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase_ : Any = (
self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ )
)
lowerCAmelCase_ : Tuple = self.attna(
a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , )
lowerCAmelCase_ : Dict = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase_ : str = self.norma(a__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
lowerCAmelCase_ : Tuple = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase_ : int = torch.cat(
[self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase_ : str = self.ff(a__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ : Optional[int] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase_ : Optional[Any] = ff_output + hidden_states
return hidden_states
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : int , lowerCamelCase : Optional[int] = None , lowerCamelCase : int = 4 , lowerCamelCase : float = 0.0 , lowerCamelCase : str = "geglu" , lowerCamelCase : bool = False , ) -> Tuple:
super().__init__()
lowerCAmelCase_ : str = int(dim * mult )
lowerCAmelCase_ : Any = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase_ : Union[str, Any] = GELU(a__ , a__ )
if activation_fn == "gelu-approximate":
lowerCAmelCase_ : str = GELU(a__ , a__ , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCAmelCase_ : Optional[int] = GEGLU(a__ , a__ )
elif activation_fn == "geglu-approximate":
lowerCAmelCase_ : Tuple = ApproximateGELU(a__ , a__ )
lowerCAmelCase_ : Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(a__ )
# project dropout
self.net.append(nn.Dropout(a__ ) )
# project out
self.net.append(nn.Linear(a__ , a__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a__ ) )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Union[str, Any]:
for module in self.net:
lowerCAmelCase_ : int = module(a__ )
return hidden_states
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : str = "none" ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ : Optional[Any] = nn.Linear(a__ , a__ )
lowerCAmelCase_ : Optional[Any] = approximate
def __lowercase ( self : Any , lowerCamelCase : str ) -> Any:
if gate.device.type != "mps":
return F.gelu(a__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowercase ( self : Any , lowerCamelCase : List[str] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.proj(a__ )
lowerCAmelCase_ : Any = self.gelu(a__ )
return hidden_states
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : int ) -> List[str]:
super().__init__()
lowerCAmelCase_ : int = nn.Linear(a__ , dim_out * 2 )
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] ) -> str:
if gate.device.type != "mps":
return F.gelu(a__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowercase ( self : str , lowerCamelCase : List[Any] ) -> Dict:
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.proj(a__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a__ )
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ : str = nn.Linear(a__ , a__ )
def __lowercase ( self : str , lowerCamelCase : Tuple ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.proj(a__ )
return x * torch.sigmoid(1.702 * x )
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] ) -> List[str]:
super().__init__()
lowerCAmelCase_ : List[Any] = nn.Embedding(a__ , a__ )
lowerCAmelCase_ : Any = nn.SiLU()
lowerCAmelCase_ : Tuple = nn.Linear(a__ , embedding_dim * 2 )
lowerCAmelCase_ : Tuple = nn.LayerNorm(a__ , elementwise_affine=a__ )
def __lowercase ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = self.linear(self.silu(self.emb(a__ ) ) )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = torch.chunk(a__ , 2 )
lowerCAmelCase_ : List[Any] = self.norm(a__ ) * (1 + scale) + shift
return x
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Dict , lowerCamelCase : str ) -> Any:
super().__init__()
lowerCAmelCase_ : Dict = CombinedTimestepLabelEmbeddings(a__ , a__ )
lowerCAmelCase_ : Any = nn.SiLU()
lowerCAmelCase_ : Dict = nn.Linear(a__ , 6 * embedding_dim , bias=a__ )
lowerCAmelCase_ : Optional[Any] = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1E-6 )
def __lowercase ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any]=None ) -> Any:
lowerCAmelCase_ : Any = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Tuple = emb.chunk(6 , dim=1 )
lowerCAmelCase_ : Optional[int] = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __snake_case ( nn.Module):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Optional[str] = None , lowerCamelCase : float = 1E-5 ) -> List[str]:
super().__init__()
lowerCAmelCase_ : Any = num_groups
lowerCAmelCase_ : Union[str, Any] = eps
if act_fn is None:
lowerCAmelCase_ : Dict = None
else:
lowerCAmelCase_ : Tuple = get_activation(a__ )
lowerCAmelCase_ : Optional[int] = nn.Linear(a__ , out_dim * 2 )
def __lowercase ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[str] ) -> int:
if self.act:
lowerCAmelCase_ : Tuple = self.act(a__ )
lowerCAmelCase_ : Any = self.linear(a__ )
lowerCAmelCase_ : List[str] = emb[:, :, None, None]
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = emb.chunk(2 , dim=1 )
lowerCAmelCase_ : Any = F.group_norm(a__ , self.num_groups , eps=self.eps )
lowerCAmelCase_ : Any = x * (1 + scale) + shift
return x
| 120 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 0 |
def lowerCAmelCase_ (lowerCAmelCase__: int = 1 , lowerCAmelCase__: int = 1_0_0_0 ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = 1
UpperCAmelCase_: Optional[Any] = 0
for divide_by_number in range(snake_case_ , digit + 1 ):
UpperCAmelCase_: Optional[int] = []
UpperCAmelCase_: Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(snake_case_ ):
UpperCAmelCase_: str = len(snake_case_ )
UpperCAmelCase_: Union[str, Any] = divide_by_number
else:
has_been_divided.append(snake_case_ )
UpperCAmelCase_: Any = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 |
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] ) -> list[int]: # This function is recursive
__snake_case = len(snake_case_ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__snake_case = array[0]
__snake_case = False
__snake_case = 1
__snake_case = []
while not is_found and i < array_length:
if array[i] < pivot:
__snake_case = True
__snake_case = [element for element in array[i:] if element >= array[i]]
__snake_case = longest_subsequence(snake_case_ )
if len(snake_case_ ) > len(snake_case_ ):
__snake_case = temp_array
else:
i += 1
__snake_case = [element for element in array[1:] if element >= pivot]
__snake_case = [pivot, *longest_subsequence(snake_case_ )]
if len(snake_case_ ) > len(snake_case_ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class A_ ( _UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase = 'lxmert'
__UpperCamelCase = {}
def __init__( self :int , lowercase_ :Any=3_05_22 , lowercase_ :Optional[int]=7_68 , lowercase_ :Dict=12 , lowercase_ :int=95_00 , lowercase_ :Dict=16_00 , lowercase_ :Any=4_00 , lowercase_ :List[str]=30_72 , lowercase_ :List[str]="gelu" , lowercase_ :int=0.1 , lowercase_ :Dict=0.1 , lowercase_ :str=5_12 , lowercase_ :Any=2 , lowercase_ :Any=0.02 , lowercase_ :Union[str, Any]=1E-12 , lowercase_ :str=9 , lowercase_ :Optional[Any]=5 , lowercase_ :int=5 , lowercase_ :Optional[int]=20_48 , lowercase_ :Union[str, Any]=4 , lowercase_ :Any=6.67 , lowercase_ :List[Any]=True , lowercase_ :str=True , lowercase_ :Optional[Any]=True , lowercase_ :Dict=True , lowercase_ :Dict=True , lowercase_ :int=True , lowercase_ :Union[str, Any]=True , **lowercase_ :List[Any] , ) -> Optional[int]:
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = num_qa_labels
UpperCAmelCase = num_object_labels
UpperCAmelCase = num_attr_labels
UpperCAmelCase = l_layers
UpperCAmelCase = x_layers
UpperCAmelCase = r_layers
UpperCAmelCase = visual_feat_dim
UpperCAmelCase = visual_pos_dim
UpperCAmelCase = visual_loss_normalizer
UpperCAmelCase = task_matched
UpperCAmelCase = task_mask_lm
UpperCAmelCase = task_obj_predict
UpperCAmelCase = task_qa
UpperCAmelCase = visual_obj_loss
UpperCAmelCase = visual_attr_loss
UpperCAmelCase = visual_feat_loss
UpperCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**a__ )
| 78 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Any , a__ : Union[str, Any] , a__ : int=13 , a__ : int=7 , a__ : Optional[Any]=True , a__ : Optional[int]=True , a__ : Any=True , a__ : str=True , a__ : List[Any]=99 , a__ : Any=24 , a__ : List[str]=2 , a__ : Optional[int]=6 , a__ : int=37 , a__ : List[str]="gelu" , a__ : List[Any]=0.1 , a__ : Optional[int]=0.1 , a__ : Union[str, Any]=512 , a__ : List[str]=16 , a__ : Optional[int]=2 , a__ : Union[str, Any]=0.0_2 , a__ : str=3 , a__ : Optional[Any]=None , a__ : Any=1000 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = scope
__snake_case = range_bbox
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case = bbox[i, j, 3]
__snake_case = bbox[i, j, 1]
__snake_case = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case = bbox[i, j, 2]
__snake_case = bbox[i, j, 0]
__snake_case = t
__snake_case = None
if self.use_input_mask:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def a (self : List[str] ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a (self : List[Any] , a__ : List[Any] , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : Optional[int] , a__ : str , a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = LiltModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ , token_type_ids=a__ )
__snake_case = model(a__ , bbox=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a (self : Any , a__ : Tuple , a__ : Dict , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] , a__ : str , a__ : Tuple , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = LiltForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a (self : int , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Optional[Any] , a__ : Tuple , a__ : Union[str, Any] , a__ : str , ):
"""simple docstring"""
__snake_case = LiltForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(
a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Any = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Optional[int] = False
A_ : List[Any] = False
def a (self : Dict , a__ : Tuple , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : Any ):
"""simple docstring"""
return True
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = LiltModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case = type
self.model_tester.create_and_check_model(*a__ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = LiltModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a__ )
__snake_case = torch.tensor([[1, 2]] , device=a__ )
__snake_case = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a__ )
# forward pass
with torch.no_grad():
__snake_case = model(input_ids=a__ , bbox=a__ )
__snake_case = torch.Size([1, 2, 768] )
__snake_case = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=a__ , )
self.assertTrue(outputs.last_hidden_state.shape , a__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a__ , atol=1E-3 ) )
| 24 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.