code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import heapq
import sys
import numpy as np
lowerCamelCase_ = tuple[int, int]
class a_ :
'''simple docstring'''
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = []
lowerCAmelCase_ = set()
def _lowercase ( self ) -> str:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return len(self.elements ) == 0
def _lowercase ( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowercase_ )
else:
# update
# print("update", item)
lowerCAmelCase_ = []
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if item in self.set:
self.set.remove(lowercase_ )
lowerCAmelCase_ = []
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _lowercase ( self ) -> str:
'''simple docstring'''
return self.elements[0][1]
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
((lowerCAmelCase_) , (lowerCAmelCase_)) = heapq.heappop(self.elements )
self.set.remove(lowercase_ )
return (priority, item)
def lowerCamelCase ( a_ , a_ ):
# euclidean distance
lowerCAmelCase_ = np.array(a_ )
lowerCAmelCase_ = np.array(a_ )
return np.linalg.norm(a - b )
def lowerCamelCase ( a_ , a_ ):
# integer division by time variable
return consistent_heuristic(a_ , a_ ) // t
def lowerCamelCase ( a_ , a_ ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCamelCase ( a_ , a_ , a_ , a_ ):
lowerCAmelCase_ = g_function[start] + Wa * heuristics[i](a_ , a_ )
return ans
def lowerCamelCase ( a_ , a_ , a_ ):
lowerCAmelCase_ = np.chararray((n, n) )
for i in range(a_ ):
for j in range(a_ ):
lowerCAmelCase_ = '*'
for i in range(a_ ):
for j in range(a_ ):
if (j, (n - 1) - i) in blocks:
lowerCAmelCase_ = '#'
lowerCAmelCase_ = '-'
lowerCAmelCase_ = back_pointer[goal]
while x != start:
((lowerCAmelCase_) , (lowerCAmelCase_)) = x
# print(x)
lowerCAmelCase_ = '-'
lowerCAmelCase_ = back_pointer[x]
lowerCAmelCase_ = '-'
for i in range(a_ ):
for j in range(a_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCAmelCase_ = back_pointer[goal]
while x != start:
print(a_ , end=' ' )
lowerCAmelCase_ = back_pointer[x]
print(a_ )
sys.exit()
def lowerCamelCase ( a_ ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
for itera in range(a_ ):
open_list[itera].remove_element(a_ )
# print("s", s)
# print("j", j)
((lowerCAmelCase_) , (lowerCAmelCase_)) = s
lowerCAmelCase_ = (x - 1, y)
lowerCAmelCase_ = (x + 1, y)
lowerCAmelCase_ = (x, y + 1)
lowerCAmelCase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(a_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(a_ )
lowerCAmelCase_ = -1
lowerCAmelCase_ = float('inf' )
if valid(a_ ) and g_function[neighbours] > g_function[s] + 1:
lowerCAmelCase_ = g_function[s] + 1
lowerCAmelCase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(a_ , key(a_ , 0 , a_ , a_ ) )
if neighbours not in close_list_inad:
for var in range(1 , a_ ):
if key(a_ , a_ , a_ , a_ ) <= Wa * key(
a_ , 0 , a_ , a_ ):
open_list[j].put(
a_ , key(a_ , a_ , a_ , a_ ) )
def lowerCamelCase ( ):
lowerCAmelCase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase_ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
lowerCamelCase_ = make_common_ground()
lowerCamelCase_ = blocks_blk
# hyper parameters
lowerCamelCase_ = 1
lowerCamelCase_ = 1
lowerCamelCase_ = 2_0
lowerCamelCase_ = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (n - 1, n - 1)
lowerCamelCase_ = 1
def lowerCamelCase ( a_ , a_ , a_ ):
lowerCAmelCase_ = {start: 0, goal: float('inf' )}
lowerCAmelCase_ = {start: -1, goal: -1}
lowerCAmelCase_ = []
lowerCAmelCase_ = set()
for i in range(a_ ):
open_list.append(PriorityQueue() )
open_list[i].put(a_ , key(a_ , a_ , a_ , a_ ) )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , a_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(a_ , a_ , a_ )
else:
lowerCAmelCase_ , lowerCAmelCase_ = open_list[i].top_show()
visited.add(a_ )
expand_state(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_inad.append(a_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(a_ , a_ , a_ )
else:
lowerCAmelCase_ = open_list[0].top_show()
visited.add(a_ )
expand_state(
a_ , 0 , a_ , a_ , a_ , a_ , a_ , a_ , )
close_list_anchor.append(a_ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(a_ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False , a_=False , a_=False ) -> List[str]:
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'),
(
'text_embeddings.position_embeddings.weight',
'vilt.embeddings.text_embeddings.position_embeddings.weight',
),
('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'),
(
'text_embeddings.token_type_embeddings.weight',
'vilt.embeddings.text_embeddings.token_type_embeddings.weight',
),
('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'),
('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'),
# patch embeddings
('transformer.cls_token', 'vilt.embeddings.cls_token'),
('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'),
('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'),
('transformer.pos_embed', 'vilt.embeddings.position_embeddings'),
# token type embeddings
('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'),
] )
# final layernorm + pooler
rename_keys.extend(
[
('transformer.norm.weight', 'vilt.layernorm.weight'),
('transformer.norm.bias', 'vilt.layernorm.bias'),
('pooler.dense.weight', 'vilt.pooler.dense.weight'),
('pooler.dense.bias', 'vilt.pooler.dense.bias'),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('vqa_classifier.0.weight', 'classifier.0.weight'),
('vqa_classifier.0.bias', 'classifier.0.bias'),
('vqa_classifier.1.weight', 'classifier.1.weight'),
('vqa_classifier.1.bias', 'classifier.1.bias'),
('vqa_classifier.3.weight', 'classifier.3.weight'),
('vqa_classifier.3.bias', 'classifier.3.bias'),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('nlvr2_classifier.0.weight', 'classifier.0.weight'),
('nlvr2_classifier.0.bias', 'classifier.0.bias'),
('nlvr2_classifier.1.weight', 'classifier.1.weight'),
('nlvr2_classifier.1.bias', 'classifier.1.bias'),
('nlvr2_classifier.3.weight', 'classifier.3.weight'),
('nlvr2_classifier.3.bias', 'classifier.3.bias'),
] )
else:
pass
return rename_keys
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase_ = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( a_ ) -> Dict:
lowerCAmelCase_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Any:
lowerCAmelCase_ = dct.pop(a_ )
lowerCAmelCase_ = val
@torch.no_grad()
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=a_ )
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
if "vqa" in checkpoint_url:
lowerCAmelCase_ = True
lowerCAmelCase_ = 3_129
lowerCAmelCase_ = 'huggingface/label-files'
lowerCAmelCase_ = 'vqa2-id2label.json'
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = ViltForQuestionAnswering(a_ )
elif "nlvr" in checkpoint_url:
lowerCAmelCase_ = True
lowerCAmelCase_ = 2
lowerCAmelCase_ = {0: 'False', 1: 'True'}
lowerCAmelCase_ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase_ = 3
lowerCAmelCase_ = ViltForImagesAndTextClassification(a_ )
elif "irtr" in checkpoint_url:
lowerCAmelCase_ = True
lowerCAmelCase_ = ViltForImageAndTextRetrieval(a_ )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase_ = True
lowerCAmelCase_ = ViltForMaskedLM(a_ )
else:
raise ValueError('Unknown model type' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ , map_location='cpu' )['state_dict']
lowerCAmelCase_ = create_rename_keys(a_ , a_ , a_ , a_ )
for src, dest in rename_keys:
rename_key(a_ , a_ , a_ )
read_in_q_k_v(a_ , a_ )
if mlm_model or irtr_model:
lowerCAmelCase_ = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(a_ , a_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a_ )
# Define processor
lowerCAmelCase_ = ViltImageProcessor(size=384 )
lowerCAmelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase_ = ViltProcessor(a_ , a_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a_ ).raw )
lowerCAmelCase_ = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=a_ ).raw )
lowerCAmelCase_ = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
lowerCAmelCase_ = processor(a_ , a_ , return_tensors='pt' )
lowerCAmelCase_ = processor(a_ , a_ , return_tensors='pt' )
lowerCAmelCase_ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase_ = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=a_ ).raw )
if mlm_model:
lowerCAmelCase_ = 'a bunch of [MASK] laying on a [MASK].'
else:
lowerCAmelCase_ = 'How many cats are there?'
lowerCAmelCase_ = processor(a_ , a_ , return_tensors='pt' )
lowerCAmelCase_ = model(**a_ )
# Verify outputs
if mlm_model:
lowerCAmelCase_ = torch.Size([1, 11, 30_522] )
lowerCAmelCase_ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase_ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase_ = torch.Size([1, 3_129] )
lowerCAmelCase_ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a_ , atol=1e-4 )
# verify vqa prediction equals "2"
lowerCAmelCase_ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase_ = torch.Size([1, 2] )
lowerCAmelCase_ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a_ ).mkdir(exist_ok=a_ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
lowerCamelCase_ = {
"""albert-base-v1""": 5_1_2,
"""albert-large-v1""": 5_1_2,
"""albert-xlarge-v1""": 5_1_2,
"""albert-xxlarge-v1""": 5_1_2,
"""albert-base-v2""": 5_1_2,
"""albert-large-v2""": 5_1_2,
"""albert-xlarge-v2""": 5_1_2,
"""albert-xxlarge-v2""": 5_1_2,
}
lowerCamelCase_ = """▁"""
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = VOCAB_FILES_NAMES
__a: Tuple = PRETRAINED_VOCAB_FILES_MAP
__a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_="[CLS]" , lowercase_="[SEP]" , lowercase_="<unk>" , lowercase_="[SEP]" , lowercase_="<pad>" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = (
AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ )
if isinstance(lowercase_ , lowercase_ )
else mask_token
)
lowerCAmelCase_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
lowerCAmelCase_ : Any = do_lower_case
lowerCAmelCase_ : Optional[Any] = remove_space
lowerCAmelCase_ : Optional[int] = keep_accents
lowerCAmelCase_ : List[str] = vocab_file
lowerCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = self.__dict__.copy()
lowerCAmelCase_ : int = None
return state
def __setstate__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , lowercase_ ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCAmelCase_ : Union[str, Any] = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase_ : str = inputs
lowerCAmelCase_ : List[str] = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
lowerCAmelCase_ : Dict = unicodedata.normalize('NFKD' , lowercase_ )
lowerCAmelCase_ : Optional[Any] = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
lowerCAmelCase_ : Any = outputs.lower()
return outputs
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.preprocess_text(lowercase_ )
lowerCAmelCase_ : str = self.sp_model.encode(lowercase_ , out_type=lowercase_ )
lowerCAmelCase_ : List[Any] = []
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
lowerCAmelCase_ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase_ : Optional[Any] = cur_pieces[1:]
else:
lowerCAmelCase_ : Optional[Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.PieceToId(lowercase_ )
def _lowercase ( self , lowercase_ ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(lowercase_ )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Tuple = ''
lowerCAmelCase_ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[Any] = []
else:
current_sub_tokens.append(lowercase_ )
lowerCAmelCase_ : Union[str, Any] = False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [self.sep_token_id]
lowerCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
lowerCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : int = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCamelCase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCamelCase_ = TaTokenizerFast
lowerCamelCase_ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCamelCase_ = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 366 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase_ = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def lowerCamelCase ( a_ ) -> Tuple:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
lowerCAmelCase_ = list(s_dict.keys() )
for key in keys:
lowerCAmelCase_ = R'.*/layers_(\d+)'
lowerCAmelCase_ = key
if re.match(a_ , a_ ):
lowerCAmelCase_ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , a_ )
lowerCAmelCase_ = R'(encoder|decoder)\/'
if re.match(a_ , a_ ):
lowerCAmelCase_ = re.match(a_ , a_ ).groups()
if groups[0] == "encoder":
lowerCAmelCase_ = re.sub(R'/mlp/' , R'/1/mlp/' , a_ )
lowerCAmelCase_ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , a_ )
elif groups[0] == "decoder":
lowerCAmelCase_ = re.sub(R'/mlp/' , R'/2/mlp/' , a_ )
lowerCAmelCase_ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , a_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase_ = new_key.replace(a_ , a_ )
print(F'''{key} -> {new_key}''' )
lowerCAmelCase_ = s_dict.pop(a_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase_ = s_dict[key].shape[0]
lowerCAmelCase_ = s_dict[key]
for idx in range(a_ ):
lowerCAmelCase_ = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(a_ )
return s_dict
lowerCamelCase_ = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def lowerCamelCase ( a_ , a_ ) -> int:
# Convert a google style config to the hugging face fromat
import regex as re
with open(a_ , 'r' ) as f:
lowerCAmelCase_ = f.read()
lowerCAmelCase_ = re.findall(R'(.*) = ([0-9.]*)' , a_ )
lowerCAmelCase_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase_ = float(a_ ) if '.' in value else int(a_ )
lowerCAmelCase_ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , a_ )[0]
lowerCAmelCase_ = str(activation[1] )
lowerCAmelCase_ = num_experts
lowerCAmelCase_ = SwitchTransformersConfig(**a_ )
return config
def lowerCamelCase ( a_ , a_ , a_=None , a_="./" , a_=8 ) -> Tuple:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
lowerCAmelCase_ = checkpoints.load_tax_checkpoint(a_ )
if gin_file is not None:
lowerCAmelCase_ = convert_gin_to_config(a_ , a_ )
else:
lowerCAmelCase_ = SwitchTransformersConfig.from_pretrained(a_ )
lowerCAmelCase_ = SwitchTransformersForConditionalGeneration(a_ )
lowerCAmelCase_ = flax_params['target']
lowerCAmelCase_ = flatten_dict(a_ , sep='/' )
lowerCAmelCase_ = rename_keys(a_ )
lowerCAmelCase_ = unflatten_dict(a_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(a_ , a_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCamelCase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 367 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase ( a_ ) -> List[str]:
lowerCAmelCase_ = args.pruning_method
lowerCAmelCase_ = args.threshold
lowerCAmelCase_ = args.model_name_or_path.rstrip('/' )
lowerCAmelCase_ = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowerCAmelCase_ = torch.load(os.path.join(a_ , 'pytorch_model.bin' ) )
lowerCAmelCase_ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowerCAmelCase_ = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowerCAmelCase_ = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ = TopKBinarizer.apply(a_ , a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ = ThresholdBinarizer.apply(a_ , a_ , a_ )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase_ = name[:-6]
lowerCAmelCase_ = model[F'''{prefix_}mask_scores''']
lowerCAmelCase_ , lowerCAmelCase_ = -0.1, 1.1
lowerCAmelCase_ = torch.sigmoid(a_ )
lowerCAmelCase_ = s * (r - l) + l
lowerCAmelCase_ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase_ = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase_ = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowerCamelCase_ = parser.parse_args()
main(args)
| 368 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 0 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))""")) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase_ = """\
"""
lowerCamelCase_ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
lowerCamelCase_ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = 1_6 , lowercase_ = True , lowercase_=None ) -> Optional[int]:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase_ = 'cuda'
else:
lowerCAmelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = model.to(lowercase_ )
lowerCAmelCase_ = AutoTokenizer.from_pretrained(lowercase_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowercase_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase_ = model.config.max_length - 1
else:
lowerCAmelCase_ = model.config.max_length
lowerCAmelCase_ = tokenizer(
lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors='pt' , return_attention_mask=lowercase_ , ).to(lowercase_ )
lowerCAmelCase_ = encodings['input_ids']
lowerCAmelCase_ = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase_ = []
lowerCAmelCase_ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(lowercase_ ) , lowercase_ ) ):
lowerCAmelCase_ = min(start_index + batch_size , len(lowercase_ ) )
lowerCAmelCase_ = encoded_texts[start_index:end_index]
lowerCAmelCase_ = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase_ )
lowerCAmelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase_ ), attn_mask] , dim=1 )
lowerCAmelCase_ = encoded_batch
with torch.no_grad():
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ ).logits
lowerCAmelCase_ = out_logits[..., :-1, :].contiguous()
lowerCAmelCase_ = labels[..., 1:].contiguous()
lowerCAmelCase_ = attn_mask[..., 1:].contiguous()
lowerCAmelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowercase_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase_ )}
| 370 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ ) -> List[List[ImageInput]]:
if isinstance(a_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(a_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(a_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'shortest_edge': 2_2_4}
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase_ = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
lowerCAmelCase_ = get_resize_output_image_size(lowercase_ , size['shortest_edge'] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
lowerCAmelCase_ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = to_numpy_array(lowercase_ )
if do_resize:
lowerCAmelCase_ = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
lowerCAmelCase_ = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
lowerCAmelCase_ = self.rescale(image=lowercase_ , scale=lowercase_ )
if do_normalize:
lowerCAmelCase_ = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
lowerCAmelCase_ = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ , default_to_square=lowercase_ )
lowerCAmelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ = get_size_dict(lowercase_ , param_name='crop_size' )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowerCAmelCase_ = make_batched(lowercase_ )
lowerCAmelCase_ = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase_ = {'pixel_values': videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 350 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase_ = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowercase ( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = TOKEN
HfFolder.save_token(lowercase_ )
@classmethod
def _lowercase ( cls ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowerCAmelCase_ = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , repo_id='test-config' , push_to_hub=lowercase_ , use_auth_token=self._token )
lowerCAmelCase_ = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowerCAmelCase_ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='valid_org/test-config-org' , push_to_hub=lowercase_ , use_auth_token=self._token )
lowerCAmelCase_ = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_ ) )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCAmelCase_ = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCAmelCase_ = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCAmelCase_ = c.n_embd + 1 # int
lowerCAmelCase_ = c.resid_pdrop + 1.0 # float
lowerCAmelCase_ = not c.scale_attn_weights # bool
lowerCAmelCase_ = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowercase_ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowercase_ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowercase_ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowercase_ , c.summary_type , 'mismatch for key: summary_type' )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = PretrainedConfig()
lowerCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowercase_ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase_ , lowercase_ )]
if len(lowercase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {", ".join(lowercase_ )}.''' )
def _lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaises(lowercase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCAmelCase_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCAmelCase_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = mock.Mock()
lowerCAmelCase_ = 5_0_0
lowerCAmelCase_ = {}
lowerCAmelCase_ = HTTPError
lowerCAmelCase_ = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowercase_ ) as mock_head:
lowerCAmelCase_ = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = AutoConfig.from_pretrained('bert-base-cased' )
lowerCAmelCase_ = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowercase_ )
lowerCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(lowercase_ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCAmelCase_ = ['config.42.0.0.json']
lowerCAmelCase_ = 7_6_8
configuration.save_pretrained(lowercase_ )
shutil.move(os.path.join(lowercase_ , 'config.4.0.0.json' ) , os.path.join(lowercase_ , 'config.42.0.0.json' ) )
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCAmelCase_ = 'v4.0.0'
lowerCAmelCase_ , lowerCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
lowercase_ , return_unused_kwargs=lowercase_ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowercase_ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCAmelCase_ = 'v3.0.0'
lowerCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase_ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 351 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 0 |
import argparse
from collections import defaultdict
import yaml
lowerCamelCase_ = """docs/source/en/_toctree.yml"""
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = defaultdict(a_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a_ )
lowerCAmelCase_ = new_doc_list
lowerCAmelCase_ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase_ = []
for duplicate_key in duplicates:
lowerCAmelCase_ = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
lowerCAmelCase_ = sorted(a_ , key=lambda a_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a_ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a_ )
# Sort
return overview_doc
def lowerCamelCase ( a_=False ) -> str:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase_ = api_doc[scheduler_idx]['sections']
lowerCAmelCase_ = clean_doc_toc(a_ )
lowerCAmelCase_ = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase_ = api_doc
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase ( a_=False ) -> Dict:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase_ = content[api_idx]['sections']
# Then to the model doc
lowerCAmelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase_ = False
lowerCAmelCase_ = api_doc[pipeline_idx]['sections']
lowerCAmelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase_ = pipeline_doc['section']
lowerCAmelCase_ = clean_doc_toc(a_ )
if overwrite:
lowerCAmelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(a_ )
# sort overall pipeline doc
lowerCAmelCase_ = clean_doc_toc(a_ )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase_ = True
if overwrite:
lowerCAmelCase_ = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase_ = api_doc
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a_ , allow_unicode=a_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 352 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase ( a_ , a_=10 ) -> Optional[int]:
lowerCAmelCase_ = []
for _ in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase ( a_ , a_=10 ) -> Optional[Any]:
lowerCAmelCase_ = []
for step in range(a_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = os.path.join(a_ , 'schedule.bin' )
torch.save(scheduler.state_dict() , a_ )
lowerCAmelCase_ = torch.load(a_ )
scheduler.load_state_dict(a_ )
return lrs
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase_ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
lowerCAmelCase_ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
lowerCAmelCase_ = torch.tensor([0.4, 0.2, -0.5] )
lowerCAmelCase_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
lowerCAmelCase_ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(1_0_0_0 ):
lowerCAmelCase_ = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Tuple = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
__a: Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
__a: str = 1_0
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = {'num_warmup_steps': 2, 'num_training_steps': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
lowerCAmelCase_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
lowerCAmelCase_ , lowerCAmelCase_ = data
lowerCAmelCase_ = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
lowerCAmelCase_ = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
lowerCAmelCase_ = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
lowerCAmelCase_ = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = fn
def __call__( self , *lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = list(map(self , scheduler.lr_lambdas ) )
| 353 |
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14 | 0 |
def lowerCamelCase ( a_ ):
if num <= 0:
raise ValueError('Input must be a positive integer' )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
lowerCAmelCase_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = '''perceiver'''
def __init__( self , lowercase_=2_5_6 , lowercase_=1_2_8_0 , lowercase_=7_6_8 , lowercase_=1 , lowercase_=2_6 , lowercase_=8 , lowercase_=8 , lowercase_=None , lowercase_=None , lowercase_="kv" , lowercase_=1 , lowercase_=1 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=2_6_2 , lowercase_=2_0_4_8 , lowercase_=5_6 , lowercase_=[3_6_8, 4_9_6] , lowercase_=1_6 , lowercase_=1_9_2_0 , lowercase_=1_6 , lowercase_=[1, 1_6, 2_2_4, 2_2_4] , **lowercase_ , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = num_latents
lowerCAmelCase_ = d_latents
lowerCAmelCase_ = d_model
lowerCAmelCase_ = num_blocks
lowerCAmelCase_ = num_self_attends_per_block
lowerCAmelCase_ = num_self_attention_heads
lowerCAmelCase_ = num_cross_attention_heads
lowerCAmelCase_ = qk_channels
lowerCAmelCase_ = v_channels
lowerCAmelCase_ = cross_attention_shape_for_attention
lowerCAmelCase_ = self_attention_widening_factor
lowerCAmelCase_ = cross_attention_widening_factor
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_query_residual
# masked language modeling attributes
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
# image classification attributes
lowerCAmelCase_ = image_size
# flow attributes
lowerCAmelCase_ = train_size
# multimodal autoencoding attributes
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = audio_samples_per_frame
lowerCAmelCase_ = samples_per_patch
lowerCAmelCase_ = output_shape
class a_ ( a_ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-4
def _lowercase ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 4_0 , lowercase_ = 4_0 , ) -> Mapping[str, Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ = preprocessor.num_special_tokens_to_add(lowercase_ )
lowerCAmelCase_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ = [' '.join(['a'] ) * seq_length] * batch_size
lowerCAmelCase_ = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
lowerCAmelCase_ = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowerCAmelCase_ = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
lowerCAmelCase_ = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 355 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = 1_0_0 , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
lowerCAmelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase_ = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowerCAmelCase_ = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowerCAmelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
' process.' )
lowerCAmelCase_ = int(lowercase_ )
lowerCAmelCase_ = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowerCAmelCase_ = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCAmelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 356 |
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
def lowerCamelCase ( a_ ):
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ):
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase_ = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase ( a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , ) -> int:
if attention_mask is None:
lowerCAmelCase_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=3_2 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=0.02 , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = initializer_range
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase_ = shift_tokens_right(lowercase_ , 1 , 2 )
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , )
lowerCAmelCase_ = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 2_0
lowerCAmelCase_ = model_class_name(lowercase_ )
lowerCAmelCase_ = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase_ , lowerCAmelCase_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowerCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , )
lowerCAmelCase_ = model.decode(lowercase_ , lowercase_ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 2_0
lowerCAmelCase_ = model_class_name(lowercase_ )
lowerCAmelCase_ = model.encode(inputs_dict['input_ids'] )
lowerCAmelCase_ , lowerCAmelCase_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
lowerCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ )
lowerCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , )
lowerCAmelCase_ = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ )
lowerCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: str = 9_9
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_config_and_data()
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
lowerCAmelCase_ = lm_model(input_ids=lowercase_ )
lowerCAmelCase_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase_ = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ )
lowerCAmelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase_ = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
lowerCAmelCase_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase_ = shift_tokens_right(lowercase_ , 1 , 2 )
lowerCAmelCase_ = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase_ = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class a_ ( a_ , unittest.TestCase , a_ ):
'''simple docstring'''
__a: List[str] = True
__a: Dict = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__a: Optional[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = FlaxBlenderbotSmallModelTester(self )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ )
lowerCAmelCase_ = model_class(lowercase_ )
@jax.jit
def encode_jitted(lowercase_ , lowercase_=None , **lowercase_ ):
return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ )
with self.subTest('JIT Enabled' ):
lowerCAmelCase_ = encode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase_ = encode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ = model_class(lowercase_ )
lowerCAmelCase_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
lowerCAmelCase_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase_ , lowercase_ , lowercase_ ):
return model.decode(
decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , )
with self.subTest('JIT Enabled' ):
lowerCAmelCase_ = decode_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase_ = decode_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase_ = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase_ = model(lowercase_ )
self.assertIsNotNone(lowercase_ )
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 0 |
lowerCamelCase_ = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 359 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 0 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase_ = trt.Logger(trt.Logger.WARNING)
lowerCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=3_8_4,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=1_2_8,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=2_0,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=3_0,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=4_2, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
lowerCamelCase_ = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
lowerCamelCase_ = args.per_device_eval_batch_size
lowerCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase_ = True
lowerCamelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
lowerCamelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
lowerCamelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
lowerCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase_ = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase_ = np.asarray(inputs['input_ids'] , dtype=np.intaa )
lowerCAmelCase_ = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
lowerCAmelCase_ = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , a_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , a_ )
# start time
lowerCAmelCase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(a_ ) for d_inp in d_inputs] + [int(a_ ), int(a_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
cuda.memcpy_dtoh_async(a_ , a_ , a_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = end_time - start_time
lowerCAmelCase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase_ = raw_datasets["""validation"""].column_names
lowerCamelCase_ = """question""" if """question""" in column_names else column_names[0]
lowerCamelCase_ = """context""" if """context""" in column_names else column_names[1]
lowerCamelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( a_ ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowerCAmelCase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=a_ , stride=args.doc_stride , return_overflowing_tokens=a_ , return_offsets_mapping=a_ , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase_ = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase_ = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase_ = tokenized_examples.sequence_ids(a_ )
lowerCAmelCase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
lowerCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
lowerCamelCase_ = default_data_collator
lowerCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
lowerCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( a_ , a_ , a_ , a_="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowerCAmelCase_ = postprocess_qa_predictions(
examples=a_ , features=a_ , predictions=a_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=a_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase_ = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase_ = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase_ = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=a_ , label_ids=a_ )
lowerCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( a_ ):
return trt.volume(engine.get_binding_shape(a_ ) ) * engine.get_binding_dtype(a_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0
lowerCamelCase_ = timeit.default_timer()
lowerCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase_ , lowerCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase_ , lowerCamelCase_ = outputs
lowerCamelCase_ = torch.tensor(start_logits)
lowerCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
lowerCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_0_0_0))
logger.info("""Total Number of Inference = %d""", niter)
lowerCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = ['''input_values''', '''attention_mask''']
def __init__( self , lowercase_ = 1 , lowercase_ = 1_6_0_0_0 , lowercase_ = 0.0 , lowercase_ = False , lowercase_ = 8_0 , lowercase_ = 1_6 , lowercase_ = 6_4 , lowercase_ = "hann_window" , lowercase_ = 1.0 , lowercase_ = 8_0 , lowercase_ = 7_6_0_0 , lowercase_ = 1e-10 , lowercase_ = 2 , lowercase_ = True , **lowercase_ , ) -> Dict:
'''simple docstring'''
super().__init__(feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ )
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = return_attention_mask
lowerCAmelCase_ = num_mel_bins
lowerCAmelCase_ = hop_length
lowerCAmelCase_ = win_length
lowerCAmelCase_ = win_function
lowerCAmelCase_ = frame_signal_scale
lowerCAmelCase_ = fmin
lowerCAmelCase_ = fmax
lowerCAmelCase_ = mel_floor
lowerCAmelCase_ = reduction_factor
lowerCAmelCase_ = win_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ = hop_length * sampling_rate // 1_0_0_0
lowerCAmelCase_ = optimal_fft_length(self.sample_size )
lowerCAmelCase_ = (self.n_fft // 2) + 1
lowerCAmelCase_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowercase_ )
lowerCAmelCase_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , lowercase_ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( lowercase_ , lowercase_ , lowercase_ = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase_ = np.array(lowercase_ , np.intaa )
lowerCAmelCase_ = []
for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ):
lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ = padding_value
normed_input_values.append(lowercase_ )
else:
lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = spectrogram(
lowercase_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature:
'''simple docstring'''
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
lowerCAmelCase_ = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
else:
lowerCAmelCase_ = None
if audio_target is not None:
lowerCAmelCase_ = self._process_audio(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase_ = inputs_target['input_values']
lowerCAmelCase_ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase_ = decoder_attention_mask
return inputs
def _lowercase ( self , lowercase_ , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ = isinstance(lowercase_ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray ):
lowerCAmelCase_ = np.asarray(lowercase_ , dtype=np.floataa )
elif isinstance(lowercase_ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase_ = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase_ = [self._extract_mel_features(lowercase_ ) for waveform in speech]
lowerCAmelCase_ = BatchFeature({'input_values': features} )
lowerCAmelCase_ = self.num_mel_bins
else:
lowerCAmelCase_ = BatchFeature({'input_values': speech} )
lowerCAmelCase_ = self.pad(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = feature_size_hack
# convert input values to correct format
lowerCAmelCase_ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(lowercase_ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCAmelCase_ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(lowercase_ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCAmelCase_ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowerCAmelCase_ = [np.asarray(lowercase_ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase_ = (
attention_mask
if self._get_padding_strategies(lowercase_ , max_length=lowercase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=lowercase_ , padding_value=self.padding_value )
if return_tensors is not None:
lowerCAmelCase_ = padded_inputs.convert_to_tensors(lowercase_ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase_ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase_ = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = VOCAB_FILES_NAMES
__a: List[str] = PRETRAINED_VOCAB_FILES_MAP
__a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: List[str] = BlenderbotSmallTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Any:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , )
lowerCAmelCase_ : List[Any] = add_prefix_space
def _lowercase ( self , lowercase_ , lowercase_=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase_ : Optional[int] = 5_0_0_0_0_0
lowerCamelCase_ , lowerCamelCase_ : Dict = os.path.split(__file__)
lowerCamelCase_ : Any = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def lowerCamelCase ( a_ , **a_ ) -> Union[str, Any]:
lowerCAmelCase_ = dataset.map(**a_ )
@get_duration
def lowerCamelCase ( a_ , **a_ ) -> List[str]:
lowerCAmelCase_ = dataset.filter(**a_ )
def lowerCamelCase ( ) -> Any:
lowerCAmelCase_ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
lowerCAmelCase_ = generate_example_dataset(
os.path.join(a_ , 'dataset.arrow' ) , a_ , num_examples=a_ )
lowerCAmelCase_ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=a_ )
def tokenize(a_ ):
return tokenizer(examples['text'] )
lowerCAmelCase_ = map(a_ )
lowerCAmelCase_ = map(a_ , batched=a_ )
lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ )
with dataset.formatted_as(type='numpy' ):
lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ )
with dataset.formatted_as(type='pandas' ):
lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
lowerCAmelCase_ = map(a_ , function=lambda a_ : None , batched=a_ )
lowerCAmelCase_ = map(a_ , function=a_ , batched=a_ )
lowerCAmelCase_ = filter(a_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(a_ , 'wb' ) as f:
f.write(json.dumps(a_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a_ , )
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = RobertaConfig
__a: Any = '''roberta'''
def __init__( self , lowercase_ ) -> Tuple:
'''simple docstring'''
super().__init__(lowercase_ )
lowerCAmelCase_ = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a_ , )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = RobertaConfig
__a: Union[str, Any] = '''roberta'''
def __init__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowercase_ )
lowerCAmelCase_ = config.num_labels
lowerCAmelCase_ = config.num_hidden_layers
lowerCAmelCase_ = DeeRobertaModel(lowercase_ )
lowerCAmelCase_ = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=-1 , lowercase_=False , ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.num_layers
try:
lowerCAmelCase_ = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
lowerCAmelCase_ = outputs[1]
lowerCAmelCase_ = self.dropout(lowercase_ )
lowerCAmelCase_ = self.classifier(lowercase_ )
lowerCAmelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ = e.message
lowerCAmelCase_ = e.exit_layer
lowerCAmelCase_ = outputs[0]
if not self.training:
lowerCAmelCase_ = entropy(lowercase_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ = MSELoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ = MSELoss()
lowerCAmelCase_ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowerCAmelCase_ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ = (loss,) + outputs
if not self.training:
lowerCAmelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = DistilBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , lowercase_ )
lowerCAmelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = DistilBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = DistilBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = DistilBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = DistilBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = DistilBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = config_and_inputs
lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: List[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__a: Tuple = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a: str = True
__a: Optional[int] = True
__a: Optional[int] = True
__a: Tuple = True
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = DistilBertModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , dim=3_7 )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
@slow
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = DistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = self._prepare_for_class(lowercase_ , lowercase_ )
lowerCAmelCase_ = torch.jit.trace(
lowercase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , 'traced_model.pt' ) )
lowerCAmelCase_ = torch.jit.load(os.path.join(lowercase_ , 'traced_model.pt' ) , map_location=lowercase_ )
loaded(inputs_dict['input_ids'].to(lowercase_ ) , inputs_dict['attention_mask'].to(lowercase_ ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = DistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase_ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4 ) )
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( a_ ) -> int:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a_ ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase_ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
lowerCAmelCase_ = PipelineDataFormat.from_str(
format=a_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a_ , a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = nlp
lowerCAmelCase_ = reader
@staticmethod
def _lowercase ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=lowercase_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=lowercase_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=lowercase_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=lowercase_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=lowercase_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=lowercase_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=lowercase_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=lowercase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self._nlp, []
for entry in self._reader:
lowerCAmelCase_ = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase_ = self._reader.save_binary(lowercase_ )
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(lowercase_ )
| 366 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = ['''image_processor''', '''tokenizer''']
__a: List[str] = '''LayoutLMv3ImageProcessor'''
__a: str = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
lowerCAmelCase_ = kwargs.pop('feature_extractor' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ = features['words']
lowerCAmelCase_ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowerCAmelCase_ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase_ = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase_ = images
return encoded_inputs
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 367 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowerCamelCase_ = """CompVis/stable-diffusion-v1-1"""
lowerCamelCase_ = """CompVis/stable-diffusion-v1-2"""
lowerCamelCase_ = """CompVis/stable-diffusion-v1-3"""
lowerCamelCase_ = """CompVis/stable-diffusion-v1-4"""
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Optional[int]:
'''simple docstring'''
super()._init_()
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase_ = StableDiffusionPipeline(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , requires_safety_checker=lowercase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _lowercase ( self ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , lowercase_ ) for k in self.config.keys() if not k.startswith('_' )}
def _lowercase ( self , lowercase_ = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Union[str, Any]:
'''simple docstring'''
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
'''simple docstring'''
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Optional[Any]:
'''simple docstring'''
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> Any:
'''simple docstring'''
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(lowercase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 368 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 0 |
import operator as op
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = lambda a_ , a_ : int(x / y ) # noqa: E731 integer division operation
lowerCAmelCase_ = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(a_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(a_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(a_ ) , sep=' | ' )
else:
lowerCAmelCase_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(a_ ) , sep=' | ' )
lowerCAmelCase_ = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(a_ ) , sep=' | ' )
stack.append(
str(opr[x](int(a_ ) , int(a_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(a_ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase_ = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """)
print("""\n\tResult = """, solve(Postfix)) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ) -> tuple[list[int], int]:
lowerCAmelCase_ = [randint(-1_000 , 1_000 ) for i in range(10 )]
lowerCAmelCase_ = randint(-5_000 , 5_000 )
return (arr, r)
lowerCamelCase_ = make_dataset()
def lowerCamelCase ( a_ , a_ ) -> tuple[int, ...]:
for triplet in permutations(a_ , 3 ):
if sum(a_ ) == target:
return tuple(sorted(a_ ) )
return (0, 0, 0)
def lowerCamelCase ( a_ , a_ ) -> tuple[int, int, int]:
arr.sort()
lowerCAmelCase_ = len(a_ )
for i in range(n - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase ( ) -> tuple[float, float]:
lowerCAmelCase_ = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
lowerCAmelCase_ = '\ntriplet_sum1(*dataset)\n'
lowerCAmelCase_ = '\ntriplet_sum2(*dataset)\n'
lowerCAmelCase_ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=10_000 )
lowerCAmelCase_ = repeat(setup=a_ , stmt=a_ , repeat=5 , number=10_000 )
return (min(a_ ), min(a_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase_ = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 370 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase ( a_ , a_ , a_ ) -> float:
lowerCAmelCase_ = x
lowerCAmelCase_ = y
for step in range(a_ ): # noqa: B007
lowerCAmelCase_ = a * a - b * b + x
lowerCAmelCase_ = 2 * a * b + y
lowerCAmelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase ( a_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase ( a_ ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) )
def lowerCamelCase ( a_ = 800 , a_ = 600 , a_ = -0.6 , a_ = 0 , a_ = 3.2 , a_ = 50 , a_ = True , ) -> Image.Image:
lowerCAmelCase_ = Image.new('RGB' , (image_width, image_height) )
lowerCAmelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a_ ):
for image_y in range(a_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ = figure_width / image_width * image_height
lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ = get_distance(a_ , a_ , a_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ = get_color_coded_rgb(a_ )
else:
lowerCAmelCase_ = get_black_and_white_rgb(a_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCamelCase_ = ["""gpt2"""]
lowerCamelCase_ = """gpt2"""
if is_tf_available():
class a_ ( tf.Module ):
'''simple docstring'''
def __init__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFGPTaLMHeadModel.from_config(lowercase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer(lowercase_ )
lowerCAmelCase_ = tokenized['input_ids'].to_tensor()
lowerCAmelCase_ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase_ = self.model(input_ids=lowercase_ , attention_mask=lowercase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = [GPTaTokenizer.from_pretrained(lowercase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase_ = [TFGPTaTokenizer.from_pretrained(lowercase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase_ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase_ = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase_ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase_ = python_outputs[key].numpy()
lowerCAmelCase_ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowercase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ = tf.function(lowercase_ )
for test_inputs in self.test_sentences:
lowerCAmelCase_ = tf.constant(lowercase_ )
lowerCAmelCase_ = compiled_tokenizer(lowercase_ )
lowerCAmelCase_ = tf_tokenizer(lowercase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ = ModelToSave(tokenizer=lowercase_ )
lowerCAmelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ = model.serving(lowercase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase_ = Path(lowercase_ ) / 'saved.model'
tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': model.serving} )
lowerCAmelCase_ = tf.saved_model.load(lowercase_ )
lowerCAmelCase_ = loaded_model.signatures['serving_default'](lowercase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ = tf_tokenizer(lowercase_ ) # Build model with some sample inputs
lowerCAmelCase_ = tf_tokenizer.get_config()
lowerCAmelCase_ = TFGPTaTokenizer.from_config(lowercase_ )
lowerCAmelCase_ = model_from_config(lowercase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase_ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase_ = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase_ = tf_tokenizer(lowercase_ , max_length=lowercase_ )
lowerCAmelCase_ = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 350 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowerCamelCase_ = logging.getLogger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_=-1 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = label_idx
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(lowercase_ , f'''{mode}.txt''' )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(lowercase_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
lowerCAmelCase_ = []
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = line.split(' ' )
words.append(splits[0] )
if len(lowercase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
return examples
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowercase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowercase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class a_ ( a_ ):
'''simple docstring'''
def __init__( self ) -> int:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(lowercase_ , f'''{mode}.txt''' )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(lowercase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowercase_ ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowercase_ ) == len(lowercase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowercase_ , labels=lowercase_ ) )
guid_index += 1
return examples
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 0
for sentence in parse_incr(lowercase_ ):
lowerCAmelCase_ = preds_list[example_id]
lowerCAmelCase_ = ''
for token in sentence:
out += f'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowercase_ )
example_id += 1
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if path:
with open(lowercase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 351 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class a_ ( a_ , a_ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase_ = 7_6_8 , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.zeros(1 , lowercase_ ) )
lowerCAmelCase_ = nn.Parameter(torch.ones(1 , lowercase_ ) )
def _lowercase ( self , lowercase_ = None , lowercase_ = None , ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = nn.Parameter(self.mean.to(lowercase_ ).to(lowercase_ ) )
lowerCAmelCase_ = nn.Parameter(self.std.to(lowercase_ ).to(lowercase_ ) )
return self
def _lowercase ( self , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 352 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase ( a_ , a_ ) -> int:
lowerCAmelCase_ = args.log_outputs
lowerCAmelCase_ = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase_ = load_metric('wer' )
lowerCAmelCase_ = load_metric('cer' )
# compute metrics
lowerCAmelCase_ = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase_ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase_ = F'''WER: {wer_result}\nCER: {cer_result}'''
print(a_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(a_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase_ = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase_ = F'''log_{dataset_id}_targets.txt'''
with open(a_ , 'w' ) as p, open(a_ , 'w' ) as t:
# mapping function to write output
def write_to_file(a_ , a_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(a_ , with_indices=a_ )
def lowerCamelCase ( a_ ) -> str:
lowerCAmelCase_ = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase_ = re.sub(a_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase_ = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
lowerCAmelCase_ = ' '.join(text.split(a_ ) )
return text
def lowerCamelCase ( a_ ) -> str:
# load dataset
lowerCAmelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase_ = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase_ = dataset.cast_column('audio' , Audio(sampling_rate=a_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase_ = 0 if torch.cuda.is_available() else -1
lowerCAmelCase_ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a_ ):
lowerCAmelCase_ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase_ = prediction['text']
lowerCAmelCase_ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase_ = dataset.map(a_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a_ , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase_ = parser.parse_args()
main(args)
| 353 |
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14 | 0 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ):
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
def lowerCamelCase ( a_ ) -> int:
assert column_title.isupper()
lowerCAmelCase_ = 0
lowerCAmelCase_ = len(a_ ) - 1
lowerCAmelCase_ = 0
while index >= 0:
lowerCAmelCase_ = (ord(column_title[index] ) - 64) * pow(26 , a_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCamelCase_ = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
lowerCamelCase_ = dataset.iloc[:, 1:2].values
lowerCamelCase_ = dataset.iloc[:, 2].values
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCamelCase_ = PolynomialFeatures(degree=4)
lowerCamelCase_ = poly_reg.fit_transform(X)
lowerCamelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase ( ) -> List[str]:
plt.scatter(a_ , a_ , color='red' )
plt.plot(a_ , pol_reg.predict(poly_reg.fit_transform(a_ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 356 |
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a_ ( a_ , a_ ):
'''simple docstring'''
@register_to_config
def __init__( self , *,
lowercase_ = 4 , lowercase_ = 7_6_8 , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.zeros(lowercase_ ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ )
# parameters for encoder hidden states
lowerCAmelCase_ = clip_extra_context_tokens
lowerCAmelCase_ = nn.Linear(
lowercase_ , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ = nn.Linear(lowercase_ , lowercase_ )
lowerCAmelCase_ = nn.LayerNorm(lowercase_ )
def _lowercase ( self , *, lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ = image_embeddings.shape[0]
lowerCAmelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ = classifier_free_guidance_embeddings.expand(
lowercase_ , -1 )
lowerCAmelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ = self.embedding_proj(lowercase_ )
lowerCAmelCase_ = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ )
lowerCAmelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ = self.clip_extra_context_tokens_proj(lowercase_ )
lowerCAmelCase_ = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ = self.encoder_hidden_states_proj(lowercase_ )
lowerCAmelCase_ = self.text_encoder_hidden_states_norm(lowercase_ )
lowerCAmelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
from sklearn.metrics import fa_score
import datasets
lowerCamelCase_ = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
lowerCamelCase_ = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
lowerCamelCase_ = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = fa_score(
lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_ )
return {"f1": float(lowercase_ ) if score.size == 1 else score}
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a_ ( a_ ):
'''simple docstring'''
@slow
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCAmelCase_ = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCAmelCase_ = bertabert.config.encoder.vocab_size
lowerCAmelCase_ = tokenizer.sep_token_id
lowerCAmelCase_ = tokenizer.cls_token_id
lowerCAmelCase_ = 1_2_8
lowerCAmelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCAmelCase_ = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCAmelCase_ = train_dataset.select(range(3_2 ) )
lowerCAmelCase_ = val_dataset.select(range(1_6 ) )
lowerCAmelCase_ = 4
def _map_to_encoder_decoder_inputs(lowercase_ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase_ = tokenizer(batch['article'] , padding='max_length' , truncation=lowercase_ , max_length=5_1_2 )
lowerCAmelCase_ = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowercase_ , max_length=1_2_8 )
lowerCAmelCase_ = inputs.input_ids
lowerCAmelCase_ = inputs.attention_mask
lowerCAmelCase_ = outputs.input_ids
lowerCAmelCase_ = outputs.input_ids.copy()
lowerCAmelCase_ = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCAmelCase_ = outputs.attention_mask
assert all(len(lowercase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowercase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowercase_ ):
lowerCAmelCase_ = pred.label_ids
lowerCAmelCase_ = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowercase_ ) )] ) / len(lowercase_ )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCAmelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowercase_ , batch_size=lowercase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = SeqaSeqTrainingArguments(
output_dir=lowercase_ , per_device_train_batch_size=lowercase_ , per_device_eval_batch_size=lowercase_ , predict_with_generate=lowercase_ , evaluation_strategy='steps' , do_train=lowercase_ , do_eval=lowercase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCAmelCase_ = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , compute_metrics=_compute_metrics , train_dataset=lowercase_ , eval_dataset=lowercase_ , tokenizer=lowercase_ , )
# start training
trainer.train()
| 359 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase ( a_ ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase ( a_ ):
return (gray > 127) & (gray <= 255)
def lowerCamelCase ( a_ , a_ ):
lowerCAmelCase_ = np.zeros_like(a_ )
lowerCAmelCase_ = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase_ = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase_ = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase_ = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCamelCase_ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowerCamelCase_ = np.array(Image.open(lena_path))
# kernel to be applied
lowerCamelCase_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCamelCase_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCamelCase_ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a_ ( a_ ):
'''simple docstring'''
__a: str = '''facebook/bart-large-mnli'''
__a: Optional[int] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__a: List[Any] = '''text_classifier'''
__a: Optional[int] = AutoTokenizer
__a: Optional[Any] = AutoModelForSequenceClassification
__a: Tuple = ['''text''', ['''text''']]
__a: int = ['''text''']
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setup()
lowerCAmelCase_ = self.model.config
lowerCAmelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase_ = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = KandinskyVaaPriorPipeline
__a: int = ['''prompt''']
__a: str = ['''prompt''', '''negative_prompt''']
__a: int = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__a: Dict = False
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ) -> Any:
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(lowercase_ )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
'num_attention_heads': 2,
'attention_head_dim': 1_2,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
lowerCAmelCase_ : int = PriorTransformer(**lowercase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase_ : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCAmelCase_ : str = CLIPVisionModelWithProjection(lowercase_ )
return model
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=lowercase_ , do_normalize=lowercase_ , do_resize=lowercase_ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.dummy_prior
lowerCAmelCase_ : Any = self.dummy_image_encoder
lowerCAmelCase_ : int = self.dummy_text_encoder
lowerCAmelCase_ : Optional[Any] = self.dummy_tokenizer
lowerCAmelCase_ : Tuple = self.dummy_image_processor
lowerCAmelCase_ : List[str] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , clip_sample_range=10.0 , )
lowerCAmelCase_ : List[str] = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ : Any = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ : List[Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ : List[str] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 'cpu'
lowerCAmelCase_ : List[Any] = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**lowercase_ )
lowerCAmelCase_ : Tuple = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(lowercase_ ) )
lowerCAmelCase_ : Optional[Any] = output.image_embeds
lowerCAmelCase_ : Any = pipe(
**self.get_dummy_inputs(lowercase_ ) , return_dict=lowercase_ , )[0]
lowerCAmelCase_ : List[Any] = image[0, -1_0:]
lowerCAmelCase_ : int = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCAmelCase_ : Any = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[str] = torch_device == 'cpu'
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : int = False
self._test_inference_batch_single_identical(
test_max_difference=lowercase_ , relax_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
@skip_mps
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = torch_device == 'cpu'
lowerCAmelCase_ : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase_ , test_mean_pixel_difference=lowercase_ , )
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
lowerCamelCase_ = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def lowerCamelCase ( a_ ) -> Optional[Any]:
lowerCAmelCase_ = {}
state_dict.pop('pixel_mean' , a_ )
state_dict.pop('pixel_std' , a_ )
lowerCAmelCase_ = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ = key.replace(a_ , a_ )
if re.match(a_ , a_ ):
lowerCAmelCase_ = int(re.match(a_ , a_ ).group(2 ) )
if layer_nb == 0:
lowerCAmelCase_ = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
lowerCAmelCase_ = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
lowerCAmelCase_ = key.replace('layers.2' , 'proj_out' )
lowerCAmelCase_ = value
lowerCAmelCase_ = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def lowerCamelCase ( a_ , a_ , a_ , a_="ybelkada/segment-anything" ) -> Union[str, Any]:
lowerCAmelCase_ = hf_hub_download(a_ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCAmelCase_ = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ = SamConfig(
vision_config=a_ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ = SamConfig(
vision_config=a_ , )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )
lowerCAmelCase_ = replace_keys(a_ )
lowerCAmelCase_ = SamImageProcessor()
lowerCAmelCase_ = SamProcessor(image_processor=a_ )
lowerCAmelCase_ = SamModel(a_ )
hf_model.load_state_dict(a_ )
lowerCAmelCase_ = hf_model.to('cuda' )
lowerCAmelCase_ = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
lowerCAmelCase_ = [[[400, 650]]]
lowerCAmelCase_ = [[1]]
lowerCAmelCase_ = processor(images=np.array(a_ ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase_ = hf_model(**a_ )
lowerCAmelCase_ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ = processor(
images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase_ = hf_model(**a_ )
lowerCAmelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ = ((75, 275, 1_725, 850),)
lowerCAmelCase_ = processor(images=np.array(a_ ) , input_boxes=a_ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase_ = hf_model(**a_ )
lowerCAmelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ = [[[400, 650], [800, 650]]]
lowerCAmelCase_ = [[1, 1]]
lowerCAmelCase_ = processor(
images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
lowerCAmelCase_ = hf_model(**a_ )
lowerCAmelCase_ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
lowerCamelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: Tuple = DiTPipeline
__a: str = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__a: Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__a: Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__a: Optional[int] = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase_ , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowercase_ , )
lowerCAmelCase_ = AutoencoderKL()
lowerCAmelCase_ = DDIMScheduler()
lowerCAmelCase_ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu'
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
lowerCAmelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
lowerCAmelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_ , 1e-3 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=lowercase_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
lowerCAmelCase_ = ['vase', 'umbrella', 'white shark', 'white wolf']
lowerCAmelCase_ = pipe.get_label_ids(lowercase_ )
lowerCAmelCase_ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(lowercase_ , lowercase_ ):
lowerCAmelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
lowerCAmelCase_ = ['vase', 'umbrella']
lowerCAmelCase_ = pipe.get_label_ids(lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(lowercase_ , lowercase_ ):
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 366 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from statistics import mean
import numpy as np
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list:
lowerCAmelCase_ = 0
# Number of processes finished
lowerCAmelCase_ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowerCAmelCase_ = [0] * no_of_process
# List to include calculation results
lowerCAmelCase_ = [0] * no_of_process
# Sort by arrival time.
lowerCAmelCase_ = [burst_time[i] for i in np.argsort(a_ )]
lowerCAmelCase_ = [process_name[i] for i in np.argsort(a_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowerCAmelCase_ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowerCAmelCase_ = arrival_time[i]
lowerCAmelCase_ = 0
# Index showing the location of the process being performed
lowerCAmelCase_ = 0
# Saves the current response ratio.
lowerCAmelCase_ = 0
for i in range(0 , a_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowerCAmelCase_ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowerCAmelCase_ = temp
lowerCAmelCase_ = i
# Calculate the turn around time
lowerCAmelCase_ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowerCAmelCase_ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> list:
lowerCAmelCase_ = [0] * no_of_process
for i in range(0 , a_ ):
lowerCAmelCase_ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase_ = 5
lowerCamelCase_ = ["""A""", """B""", """C""", """D""", """E"""]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = [1, 2, 3, 4, 5]
lowerCamelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 367 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
def lowerCamelCase ( a_ , a_ ) -> str:
if not (isinstance(a_ , a_ ) and isinstance(a_ , a_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
lowerCAmelCase_ = len(a_ )
lowerCAmelCase_ = len(a_ )
lowerCAmelCase_ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase_ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase_ = i
lowerCAmelCase_ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from __future__ import annotations
def lowerCamelCase ( a_ , a_ ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
lowerCAmelCase_ = number_of_bytes // partitions
lowerCAmelCase_ = []
for i in range(a_ ):
lowerCAmelCase_ = i * bytes_per_partition + 1
lowerCAmelCase_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
lowerCAmelCase_ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 'sgugger/tiny-distilbert-classification'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tinier_bart'
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tinier_bart'
lowerCAmelCase_ = AutoConfig.from_pretrained(lowercase_ )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() )
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ ):
self.assertTrue(hasattr(lowercase_ , 'sequential' ) )
self.assertTrue(hasattr(lowercase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase_ , 'current' ) )
self.assertTrue(hasattr(lowercase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
lowerCAmelCase_ = PyTorchBenchmark(lowercase_ )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase_ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class a_ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , lowercase_ = " " ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = sentence_delimiter
def _lowercase ( self , lowercase_ ) -> str:
'''simple docstring'''
return list(lowercase_ )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = []
for sent_idx, sentence in enumerate(lowercase_ ):
chars.extend(self.process_string(lowercase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=False ) -> Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"]
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
lowerCAmelCase_ = jiwer.compute_measures(
lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 350 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCamelCase_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCamelCase_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="auto" , lowercase_=-1 , lowercase_=0.9 , lowercase_=5 , lowercase_=5_0_0 , lowercase_="gpt2-large" , lowercase_=-1 , lowercase_=1_0_2_4 , lowercase_=2_5 , lowercase_=5 , lowercase_=True , lowercase_=2_5 , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = compute_mauve(
p_text=lowercase_ , q_text=lowercase_ , p_features=lowercase_ , q_features=lowercase_ , p_tokens=lowercase_ , q_tokens=lowercase_ , num_buckets=lowercase_ , pca_max_data=lowercase_ , kmeans_explained_var=lowercase_ , kmeans_num_redo=lowercase_ , kmeans_max_iter=lowercase_ , featurize_model_name=lowercase_ , device_id=lowercase_ , max_text_length=lowercase_ , divergence_curve_discretization_size=lowercase_ , mauve_scaling_factor=lowercase_ , verbose=lowercase_ , seed=lowercase_ , )
return out
| 351 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 0 |
lowerCamelCase_ = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
lowerCamelCase_ = {value: key for key, value in encode_dict.items()}
def lowerCamelCase ( a_ ) -> str:
lowerCAmelCase_ = ''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('encode() accepts only letters of the alphabet and spaces' )
return encoded
def lowerCamelCase ( a_ ) -> str:
if set(a_ ) - {"A", "B", " "} != set():
raise Exception('decode() accepts only \'A\', \'B\' and spaces' )
lowerCAmelCase_ = ''
for word in coded.split():
while len(a_ ) != 0:
decoded += decode_dict[word[:5]]
lowerCAmelCase_ = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase_ = 2_5_6_0_4_7
lowerCamelCase_ = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = NllbTokenizer
__a: Union[str, Any] = NllbTokenizerFast
__a: Dict = True
__a: int = True
__a: Tuple = {}
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = NllbTokenizer(lowercase_ , keep_accents=lowercase_ )
lowerCAmelCase_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowercase_ , lowercase_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = tokenizer_r.save_pretrained(lowercase_ , legacy_format=lowercase_ )
lowerCAmelCase_ = tokenizer_p.save_pretrained(lowercase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ = tokenizer_r.from_pretrained(lowercase_ )
lowerCAmelCase_ = tokenizer_p.from_pretrained(lowercase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase_ , lowercase_ ) )
shutil.rmtree(lowercase_ )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_seqaseq:
return
lowerCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
lowerCAmelCase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , tgt_texts=lowercase_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
lowercase_ , tgt_texts=lowercase_ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCAmelCase_ = tokenizer.prepare_seqaseq_batch(
src_texts=lowercase_ , max_length=3 , max_target_length=1_0 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , lowercase_ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ = [AddedToken('<special>' , lstrip=lowercase_ )]
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ )
lowerCAmelCase_ = tokenizer_r.encode('Hey this is a <special> token' )
lowerCAmelCase_ = tokenizer_r.encode('<special>' , add_special_tokens=lowercase_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(
lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ )
lowerCAmelCase_ = tokenizer_p.encode('Hey this is a <special> token' )
lowerCAmelCase_ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(lowercase_ , lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: List[Any] = '''facebook/nllb-200-distilled-600M'''
__a: Tuple = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a: Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a: Any = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _lowercase ( cls ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
lowerCAmelCase_ = 1
return cls
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 2_5_6_0_5_7 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
def _lowercase ( self ) -> str:
'''simple docstring'''
self.assertIn(lowercase_ , self.tokenizer.all_special_ids )
# fmt: off
lowerCAmelCase_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
lowerCAmelCase_ = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.assertNotIn(self.tokenizer.eos_token , lowercase_ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , lowercase_ )
lowerCAmelCase_ = 1_0
lowerCAmelCase_ = self.tokenizer(lowercase_ , max_length=lowercase_ , truncation=lowercase_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , lowercase_ )
self.assertEqual(len(lowercase_ ) , lowercase_ )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_6_2_0_3, 3] )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase_ )
lowerCAmelCase_ = NllbTokenizer.from_pretrained(lowercase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase_ )
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCAmelCase_ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
lowerCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase_ )
self.assertEqual(lowercase_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer(self.src_text , padding=lowercase_ , truncation=lowercase_ , max_length=3 , return_tensors='pt' )
lowerCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=lowercase_ , truncation=lowercase_ , max_length=1_0 , return_tensors='pt' )
lowerCAmelCase_ = targets['input_ids']
lowerCAmelCase_ = shift_tokens_right(
lowercase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(lowercase_ ) , {
# A, test, EOS, en_XX
'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_6_0_5_7,
} , )
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
lowerCAmelCase_ = False
lowerCAmelCase_ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 353 |
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14 | 0 |
from math import factorial
lowerCamelCase_ = {str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase ( a_ ):
if not isinstance(a_ , a_ ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_ ) )
def lowerCamelCase ( a_ = 60 , a_ = 1_000_000 ):
if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
lowerCAmelCase_ = 0
# the cached sizes of the previous chains
lowerCAmelCase_ = {}
for start_chain_element in range(1 , a_ ):
# The temporary set will contain the elements of the chain
lowerCAmelCase_ = set()
lowerCAmelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCAmelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_ )
chain_set_length += 1
lowerCAmelCase_ = digit_factorial_sum(a_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCAmelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=3_0 , lowercase_=4_0_0 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 2_5_5 , lowercase_=True , ) -> int:
'''simple docstring'''
lowerCAmelCase_ = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_pad
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self , lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
if not batched:
lowerCAmelCase_ = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCAmelCase_ = self.size['shortest_edge']
elif w > h:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCAmelCase_ = self.size['shortest_edge']
lowerCAmelCase_ = self.size['shortest_edge']
else:
lowerCAmelCase_ = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
lowerCAmelCase_ = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( a_ , unittest.TestCase ):
'''simple docstring'''
__a: Dict = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase_ , 'do_pad' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase_ )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase_ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase_ = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor()
lowerCAmelCase_ = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
lowerCAmelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
lowerCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCAmelCase_ = json.loads(f.read() )
lowerCAmelCase_ = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCAmelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCAmelCase_ = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCAmelCase_ = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
lowerCAmelCase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4 ) )
# verify area
lowerCAmelCase_ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
lowerCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
lowerCAmelCase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
lowerCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
lowerCAmelCase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
lowerCAmelCase_ = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
lowerCAmelCase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
lowerCAmelCase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 355 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = '''git_vision_model'''
def __init__( self , lowercase_=7_6_8 , lowercase_=3_0_7_2 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3 , lowercase_=2_2_4 , lowercase_=1_6 , lowercase_="quick_gelu" , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=0.02 , **lowercase_ , ) -> int:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = hidden_act
@classmethod
def _lowercase ( cls , lowercase_ , **lowercase_ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
lowerCAmelCase_ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class a_ ( a_ ):
'''simple docstring'''
__a: List[str] = '''git'''
def __init__( self , lowercase_=None , lowercase_=3_0_5_2_2 , lowercase_=7_6_8 , lowercase_=6 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0_2_4 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=0 , lowercase_="absolute" , lowercase_=True , lowercase_=False , lowercase_=1_0_1 , lowercase_=1_0_2 , lowercase_=None , **lowercase_ , ) -> Any:
'''simple docstring'''
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , pad_token_id=lowercase_ , **lowercase_ )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
lowerCAmelCase_ = GitVisionConfig(**lowercase_ )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = tie_word_embeddings
lowerCAmelCase_ = num_image_with_embedding
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = eos_token_id
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 356 |
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
lowerCamelCase_ = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
lowerCamelCase_ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase_ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCamelCase_ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class a_ ( a_ ):
'''simple docstring'''
__a: int = VOCAB_FILES_NAMES
__a: Tuple = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a: Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = VOCAB_FILES_NAMES
__a: Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__a: Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: Tuple = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCamelCase_ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCamelCase_ = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a_ )
class a_ :
'''simple docstring'''
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
elif titles is None or texts is None:
lowerCAmelCase_ = titles if texts is None else texts
return super().__call__(
lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = titles if not isinstance(lowercase_ , lowercase_ ) else [titles]
lowerCAmelCase_ = texts if not isinstance(lowercase_ , lowercase_ ) else [texts]
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = questions if not isinstance(lowercase_ , lowercase_ ) else [questions] * n_passages
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
f'''There should be as many titles than texts but got {len(lowercase_ )} titles and {len(lowercase_ )} texts.''' )
lowerCAmelCase_ = super().__call__(lowercase_ , lowercase_ , padding=lowercase_ , truncation=lowercase_ )['input_ids']
lowerCAmelCase_ = super().__call__(lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ )['input_ids']
lowerCAmelCase_ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase_ , lowercase_ )
]
}
if return_attention_mask is not False:
lowerCAmelCase_ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCAmelCase_ = attention_mask
return self.pad(lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_tensors=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = 1_6 , lowercase_ = 6_4 , lowercase_ = 4 , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase_ = reader_input['input_ids']
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = reader_output[:3]
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = sorted(range(lowercase_ ) , reverse=lowercase_ , key=relevance_logits.__getitem__ )
lowerCAmelCase_ = []
for doc_id in sorted_docs:
lowerCAmelCase_ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase_ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase_ = sequence_ids.index(self.pad_token_id )
else:
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase_ , top_spans=lowercase_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase_ , start_index=lowercase_ , end_index=lowercase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowercase_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[DPRSpanPrediction]:
'''simple docstring'''
lowerCAmelCase_ = []
for start_index, start_score in enumerate(lowercase_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCAmelCase_ = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
lowerCAmelCase_ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
lowerCAmelCase_ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowercase_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = VOCAB_FILES_NAMES
__a: str = READER_PRETRAINED_VOCAB_FILES_MAP
__a: Optional[int] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: List[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__a: str = ['''input_ids''', '''attention_mask''']
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 0 |
import math
lowerCamelCase_ = 1_0
lowerCamelCase_ = 7
lowerCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase ( a_ = 20 ) -> str:
lowerCAmelCase_ = math.comb(a_ , a_ )
lowerCAmelCase_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a_ )
lowerCAmelCase_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(2_0))
| 359 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 0 |
def lowerCamelCase ( a_ ):
lowerCAmelCase_ = [0] * len(a_ )
lowerCAmelCase_ = []
lowerCAmelCase_ = [1] * len(a_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(a_ ) ):
if indegree[i] == 0:
queue.append(a_ )
while queue:
lowerCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(a_ )
print(max(a_ ) )
# Adjacency list of Graph
lowerCamelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
lowerCamelCase_ = """bert-base-cased"""
lowerCamelCase_ = """fp16"""
lowerCamelCase_ = """bf16"""
lowerCamelCase_ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = dict(
ACCELERATE_USE_FSDP='true' , MASTER_ADDR='localhost' , MASTER_PORT='10999' , RANK='0' , LOCAL_RANK='0' , WORLD_SIZE='1' , )
def _lowercase ( self ) -> str:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowercase_ ):
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = f'''{i + 1}'''
lowerCAmelCase_ = strategy
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowercase_ ):
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = prefetch_policy
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self ) -> int:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowercase_ ):
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = state_dict_type
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = AutoModel.from_pretrained(lowercase_ )
for policy in FSDP_AUTO_WRAP_POLICY:
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowerCAmelCase_ = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
lowerCAmelCase_ = '2000'
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = 'TRANSFORMER_BASED_WRAP'
lowerCAmelCase_ = 'T5Layer'
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(lowercase_ ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = 'SIZE_BASED_WRAP'
lowerCAmelCase_ = '0'
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowercase_ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self ) -> int:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = mp_dtype
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = Accelerator()
if mp_dtype == "fp16":
lowerCAmelCase_ = torch.floataa
elif mp_dtype == "bf16":
lowerCAmelCase_ = torch.bfloataa
lowerCAmelCase_ = MixedPrecision(param_dtype=lowercase_ , reduce_dtype=lowercase_ , buffer_dtype=lowercase_ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowercase_ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowercase_ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowerCAmelCase_ = self.dist_env.copy()
lowerCAmelCase_ = str(lowercase_ ).lower()
with mockenv_context(**lowercase_ ):
lowerCAmelCase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowercase_ ) )
@require_fsdp
@require_multi_gpu
@slow
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 0.82
lowerCAmelCase_ = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
lowerCAmelCase_ = {
'multi_gpu_fp16': 3_2_0_0,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2_0_0_0,
'fsdp_full_shard_transformer_based_wrap_fp16': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowerCAmelCase_ = 1_6_0
lowerCAmelCase_ = 1_6_0
lowerCAmelCase_ = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = os.path.join(self.test_scripts_folder , 'test_performance.py' )
lowerCAmelCase_ = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
lowerCAmelCase_ = cmd.copy()
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = os.path.join(self.test_scripts_folder , 'test_checkpointing.py' )
lowerCAmelCase_ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(lowercase_ ):
lowerCAmelCase_ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowerCAmelCase_ = len(lowercase_ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowerCAmelCase_ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
lowerCAmelCase_ = cmd_config[:-1]
lowerCAmelCase_ = os.path.join(self.tmpdir , 'epoch_0' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = os.path.join(self.test_scripts_folder , 'test_peak_memory_usage.py' )
lowerCAmelCase_ = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowerCAmelCase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(lowercase_ ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowercase_ , env=os.environ.copy() )
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class a_ ( a_ ):
'''simple docstring'''
__a: List[str] = '''swinv2'''
__a: Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=2_2_4 , lowercase_=4 , lowercase_=3 , lowercase_=9_6 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 1_2, 2_4] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=3_2 , **lowercase_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Optional[Any] = embed_dim
lowerCAmelCase_ : str = depths
lowerCAmelCase_ : Optional[int] = len(lowercase_ )
lowerCAmelCase_ : Tuple = num_heads
lowerCAmelCase_ : Optional[Any] = window_size
lowerCAmelCase_ : Any = mlp_ratio
lowerCAmelCase_ : Optional[Any] = qkv_bias
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[int] = use_absolute_embeddings
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : int = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Optional[Any] = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase ( a_ , a_ ) -> int:
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
else:
lowerCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
a_ , output_loading_info=a_ )
lowerCAmelCase_ = ['key_proj', 'value_proj', 'query_proj']
lowerCAmelCase_ = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ = key.split('.' )
if attributes[0] == "lm_head":
lowerCAmelCase_ = prophet
lowerCAmelCase_ = prophet_old
else:
lowerCAmelCase_ = prophet.prophetnet
lowerCAmelCase_ = prophet_old.model
lowerCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ = mapping[attribute]
if not hasattr(a_ , a_ ) and len(a_ ) > 0:
lowerCAmelCase_ = attribute
elif hasattr(a_ , a_ ):
lowerCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
lowerCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ = old_model.bias
logger.info(F'''{attribute} is initialized''' )
lowerCAmelCase_ = True
break
elif attribute in special_keys and hasattr(a_ , 'in_proj_weight' ):
lowerCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ = getattr(a_ , a_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ = True
break
if attribute.isdigit():
lowerCAmelCase_ = model[int(a_ )]
lowerCAmelCase_ = old_model[int(a_ )]
else:
lowerCAmelCase_ = getattr(a_ , a_ )
if old_attribute == "":
lowerCAmelCase_ = old_model
else:
if not hasattr(a_ , a_ ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ = getattr(a_ , a_ )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
def lowerCamelCase ( a_ , a_ ) -> int:
if len(a_ ) != len(a_ ):
raise ValueError('String lengths must match!' )
lowerCAmelCase_ = 0
for chara, chara in zip(a_ , a_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase_ = {
"""vocab_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json""",
},
"""added_tokens.json""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json""",
},
"""merges_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ = {
"""RUCAIBox/mvp""": 1_0_2_4,
}
class a_ ( a_ ):
'''simple docstring'''
__a: Dict = VOCAB_FILES_NAMES
__a: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__a: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: List[str] = ['''input_ids''', '''attention_mask''']
__a: List[str] = MvpTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="replace" , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=False , lowercase_=True , **lowercase_ , ) -> int:
'''simple docstring'''
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
lowerCAmelCase_ = getattr(lowercase_ , pre_tok_state.pop('type' ) )
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = pre_tok_class(**lowercase_ )
lowerCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ = 'post_processor'
lowerCAmelCase_ = getattr(self.backend_tokenizer , lowercase_ , lowercase_ )
if tokenizer_component_instance:
lowerCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase_ = tuple(state['cls'] )
lowerCAmelCase_ = False
if state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
lowerCAmelCase_ = add_prefix_space
lowerCAmelCase_ = True
if state.get('trim_offsets' , lowercase_ ) != trim_offsets:
lowerCAmelCase_ = trim_offsets
lowerCAmelCase_ = True
if changes_to_apply:
lowerCAmelCase_ = getattr(lowercase_ , state.pop('type' ) )
lowerCAmelCase_ = component_class(**lowercase_ )
setattr(self.backend_tokenizer , lowercase_ , lowercase_ )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self , lowercase_ ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value
lowerCAmelCase_ = value
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ = kwargs.get('is_split_into_words' , lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 366 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import functools
def lowerCamelCase ( a_ , a_ ) -> int:
# Validation
if not isinstance(a_ , a_ ) or not all(isinstance(a_ , a_ ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(a_ ) != 3 or not all(isinstance(a_ , a_ ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(a_ ) == 0:
return 0
if min(a_ ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(a_ ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCAmelCase_ = set(a_ )
@functools.cache
def dynamic_programming(a_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase ( a_ , a_ ) -> int:
lowerCAmelCase_ = int(a_ )
assert noofclusters < len(a_ )
# Find out the dimensionality
lowerCAmelCase_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase_ = list(range(len(a_ ) ) )
shuffle(a_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase_ = tf.placeholder('float64' , [dim] )
lowerCAmelCase_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(a_ , a_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase_ = [tf.Variable(0 ) for i in range(len(a_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase_ = tf.placeholder('int32' )
lowerCAmelCase_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a_ , a_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase_ = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase_ = tf.reduce_mean(a_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase_ = tf.placeholder('float' , [dim] )
lowerCAmelCase_ = tf.placeholder('float' , [dim] )
lowerCAmelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a_ , a_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase_ = tf.placeholder('float' , [noofclusters] )
lowerCAmelCase_ = tf.argmin(a_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(a_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase_ = 100
for _ in range(a_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a_ ) ):
lowerCAmelCase_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase_ = [
sess.run(a_ , feed_dict={va: vect, va: sess.run(a_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase_ = sess.run(
a_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a_ ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase_ = [
vectors[i]
for i in range(len(a_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase_ = sess.run(
a_ , feed_dict={mean_input: array(a_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase_ = sess.run(a_ )
lowerCAmelCase_ = sess.run(a_ )
return centroids, assignments
| 368 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCAmelCase_ = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCAmelCase_ = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if "bot_conv" in key:
lowerCAmelCase_ = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCAmelCase_ = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCAmelCase_ = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCAmelCase_ = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCAmelCase_ = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCAmelCase_ = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCAmelCase_ = key.replace('module.last_layer_depth' , 'head.head' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> str:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[config.hidden_sizes[i] :]
def lowerCamelCase ( ) -> Any:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_=False , a_=None ) -> Optional[int]:
lowerCAmelCase_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
# rename keys
lowerCAmelCase_ = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
lowerCAmelCase_ = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCAmelCase_ = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
lowerCAmelCase_ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , a_ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=a_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(a_ , a_ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=a_ , )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
lowerCamelCase_ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 370 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( a_ ):
'''simple docstring'''
__a: Tuple = ['''image_processor''', '''tokenizer''']
__a: Union[str, Any] = '''AutoImageProcessor'''
__a: str = '''AutoTokenizer'''
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
lowerCAmelCase_ = kwargs.pop('feature_extractor' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.image_processor
lowerCAmelCase_ = False
def __call__( self , *lowercase_ , **lowercase_ ) -> List[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowercase_ , **lowercase_ )
lowerCAmelCase_ = kwargs.pop('images' , lowercase_ )
lowerCAmelCase_ = kwargs.pop('text' , lowercase_ )
if len(lowercase_ ) > 0:
lowerCAmelCase_ = args[0]
lowerCAmelCase_ = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
lowerCAmelCase_ = self.image_processor(lowercase_ , *lowercase_ , **lowercase_ )
if text is not None:
lowerCAmelCase_ = self.tokenizer(lowercase_ , **lowercase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ = encodings['input_ids']
return inputs
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@contextmanager
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
lowerCAmelCase_ = True
lowerCAmelCase_ = self.tokenizer
yield
lowerCAmelCase_ = self.image_processor
lowerCAmelCase_ = False
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=None ) -> List[str]:
'''simple docstring'''
if added_vocab is None:
lowerCAmelCase_ = self.tokenizer.get_added_vocab()
lowerCAmelCase_ = {}
while tokens:
lowerCAmelCase_ = re.search(R'<s_(.*?)>' , lowercase_ , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase_ = start_token.group(1 )
lowerCAmelCase_ = re.search(Rf'''</s_{key}>''' , lowercase_ , re.IGNORECASE )
lowerCAmelCase_ = start_token.group()
if end_token is None:
lowerCAmelCase_ = tokens.replace(lowercase_ , '' )
else:
lowerCAmelCase_ = end_token.group()
lowerCAmelCase_ = re.escape(lowercase_ )
lowerCAmelCase_ = re.escape(lowercase_ )
lowerCAmelCase_ = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , lowercase_ , re.IGNORECASE )
if content is not None:
lowerCAmelCase_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase_ = self.tokenajson(lowercase_ , is_inner_value=lowercase_ , added_vocab=lowercase_ )
if value:
if len(lowercase_ ) == 1:
lowerCAmelCase_ = value[0]
lowerCAmelCase_ = value
else: # leaf nodes
lowerCAmelCase_ = []
for leaf in content.split(R'<sep/>' ):
lowerCAmelCase_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase_ = leaf[1:-2] # for categorical special tokens
output[key].append(lowercase_ )
if len(output[key] ) == 1:
lowerCAmelCase_ = output[key][0]
lowerCAmelCase_ = tokens[tokens.find(lowercase_ ) + len(lowercase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase_ , added_vocab=lowercase_ )
if len(lowercase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCamelCase_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
lowerCamelCase_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
lowerCamelCase_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCamelCase_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
lowerCamelCase_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase ( a_ , a_ ) -> int:
for tf_name, hf_name in patterns:
lowerCAmelCase_ = k.replace(a_ , a_ )
return k
def lowerCamelCase ( a_ , a_ ) -> BigBirdPegasusForConditionalGeneration:
lowerCAmelCase_ = BigBirdPegasusConfig(**a_ )
lowerCAmelCase_ = BigBirdPegasusForConditionalGeneration(a_ )
lowerCAmelCase_ = torch_model.state_dict()
lowerCAmelCase_ = {}
# separating decoder weights
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
lowerCAmelCase_ = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
lowerCAmelCase_ = [k.endswith(a_ ) for ending in KEYS_TO_IGNORE]
if any(a_ ):
continue
lowerCAmelCase_ = DECODER_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(a_ , a_ )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(a_ )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
lowerCAmelCase_ = [k.endswith(a_ ) for ending in KEYS_TO_IGNORE]
if any(a_ ):
continue
lowerCAmelCase_ = REMAINING_PATTERNS
lowerCAmelCase_ = rename_state_dict_key(a_ , a_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
lowerCAmelCase_ = v.T
lowerCAmelCase_ = torch.from_numpy(a_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
lowerCAmelCase_ = mapping['model.embed_positions.weight']
lowerCAmelCase_ = mapping.pop('model.embed_positions.weight' )
lowerCAmelCase_ , lowerCAmelCase_ = torch_model.load_state_dict(a_ , strict=a_ )
lowerCAmelCase_ = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def lowerCamelCase ( a_ ) -> Dict:
lowerCAmelCase_ = tf.train.list_variables(a_ )
lowerCAmelCase_ = {}
lowerCAmelCase_ = ['global_step']
for name, shape in tqdm(a_ , desc='converting tf checkpoint to dict' ):
lowerCAmelCase_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase_ = tf.train.load_variable(a_ , a_ )
lowerCAmelCase_ = array
return tf_weights
def lowerCamelCase ( a_ , a_ , a_ ) -> Dict:
lowerCAmelCase_ = get_tf_weights_as_numpy(a_ )
lowerCAmelCase_ = convert_bigbird_pegasus(a_ , a_ )
torch_model.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 350 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCAmelCase_ = FileLock(str(tmpdir / 'foo.lock' ) )
lowerCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(a_ ):
lowerCAmelCase_ = time.time()
locka.acquire(a_ )
assert time.time() - _start > timeout
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = 'a' * 1_000 + '.lock'
lowerCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(a_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(a_ ):
locka.acquire(0 )
| 351 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: Node | None = None
__a: Node | None = None
def lowerCamelCase ( ) -> Node | None:
lowerCAmelCase_ = Node(1 )
lowerCAmelCase_ = Node(2 )
lowerCAmelCase_ = Node(3 )
lowerCAmelCase_ = Node(4 )
lowerCAmelCase_ = Node(5 )
return tree
def lowerCamelCase ( a_ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase ( a_ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase ( a_ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase ( a_ ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase ( a_ ) -> Sequence[Node | None]:
lowerCAmelCase_ = []
if root is None:
return output
lowerCAmelCase_ = deque([root] )
while process_queue:
lowerCAmelCase_ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase ( a_ , a_ ) -> Sequence[Node | None]:
lowerCAmelCase_ = []
def populate_output(a_ , a_ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a_ , a_ )
return output
def lowerCamelCase ( a_ , a_ ) -> Sequence[Node | None]:
lowerCAmelCase_ = []
def populate_output(a_ , a_ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a_ , a_ )
return output
def lowerCamelCase ( a_ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
lowerCAmelCase_ = []
lowerCAmelCase_ = 0
lowerCAmelCase_ = height(a_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a_ , a_ ) )
lowerCAmelCase_ = 1
else:
output.append(get_nodes_from_right_to_left(a_ , a_ ) )
lowerCAmelCase_ = 0
return output
def lowerCamelCase ( ) -> None: # Main function for testing.
lowerCAmelCase_ = make_tree()
print(F'''In-order Traversal: {inorder(a_ )}''' )
print(F'''Pre-order Traversal: {preorder(a_ )}''' )
print(F'''Post-order Traversal: {postorder(a_ )}''' , '\n' )
print(F'''Height of Tree: {height(a_ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(a_ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(a_ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(a_ , level=a_ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 353 |
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = data
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def lowerCamelCase ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
lowerCAmelCase_ = input('Enter the value of the root node: ' ).strip().lower()
lowerCAmelCase_ = queue.Queue()
lowerCAmelCase_ = TreeNode(int(a_ ) )
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
lowerCAmelCase_ = F'''Enter the left node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = left_node
q.put(a_ )
lowerCAmelCase_ = F'''Enter the right node of {node_found.data}: '''
lowerCAmelCase_ = input(a_ ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCAmelCase_ = TreeNode(int(a_ ) )
lowerCAmelCase_ = right_node
q.put(a_ )
raise
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = queue.Queue()
q.put(a_ )
while not q.empty():
lowerCAmelCase_ = []
while not q.empty():
lowerCAmelCase_ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(a_ )
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(a_ )
lowerCAmelCase_ = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ = stack.pop()
# start to traverse its right child
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ = []
lowerCAmelCase_ = node
while n or stack:
while n:
stack.append(a_ )
lowerCAmelCase_ = n.left
lowerCAmelCase_ = stack.pop()
print(n.data , end=',' )
lowerCAmelCase_ = n.right
def lowerCamelCase ( a_ ) -> None:
if not isinstance(a_ , a_ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ = [], []
lowerCAmelCase_ = node
stacka.append(a_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(a_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase ( a_ = "" , a_=50 , a_="*" ) -> str:
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ = divmod(width - len(a_ ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
lowerCamelCase_ = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 5_0 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 14 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 354 |
import baseaa
def lowerCamelCase ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def lowerCamelCase ( a_ ) -> str:
return baseaa.baadecode(a_ ).decode('utf-8' )
if __name__ == "__main__":
lowerCamelCase_ = """Hello World!"""
lowerCamelCase_ = baseaa_encode(test)
print(encoded)
lowerCamelCase_ = baseaa_decode(encoded)
print(decoded)
| 14 | 0 |
from __future__ import annotations
def lowerCamelCase ( a_ , a_ ) -> float:
lowerCAmelCase_ = sorted(numsa + numsa )
lowerCAmelCase_ , lowerCAmelCase_ = divmod(len(a_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = [float(x) for x in input("""Enter the elements of first array: """).split()]
lowerCamelCase_ = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 355 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 356 |
lowerCamelCase_ = 6_5_5_2_1
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
for plain_chr in plain_text:
lowerCAmelCase_ = (a + ord(a_ )) % MOD_ADLER
lowerCAmelCase_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 0 |
from collections import namedtuple
lowerCamelCase_ = namedtuple("""from_to""", """from_ to""")
lowerCamelCase_ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def lowerCamelCase ( a_ , a_ , a_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowerCamelCase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowerCamelCase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase ( a_ , a_ ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(a_ ) - np.asarray(a_ )) ** 2 ) )
def lowerCamelCase ( a_ , a_ ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(a_ , a_ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""shi-labs/nat-mini-in1k-224""": """https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json""",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Optional[Any] = '''nat'''
__a: int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowercase_=4 , lowercase_=3 , lowercase_=6_4 , lowercase_=[3, 4, 6, 5] , lowercase_=[2, 4, 8, 1_6] , lowercase_=7 , lowercase_=3.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=0.0 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = depths
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = kernel_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 14 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCamelCase_ = """sshleifer/bart-tiny-random"""
lowerCamelCase_ = """patrickvonplaten/t5-tiny-random"""
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return AutoConfig.from_pretrained(lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=lowercase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , *lowerCAmelCase_ = create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaises(lowercase_ ):
create_student_by_copying_alternating_layers(lowercase_ , tempfile.mkdtemp() , e=lowercase_ , d=lowercase_ )
| 359 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = """pytorch_model.bin"""
lowerCamelCase_ = """pytorch_model.bin.index.json"""
lowerCamelCase_ = """adapter_config.json"""
lowerCamelCase_ = """adapter_model.bin"""
lowerCamelCase_ = """adapter_model.safetensors"""
lowerCamelCase_ = """tf_model.h5"""
lowerCamelCase_ = """tf_model.h5.index.json"""
lowerCamelCase_ = """model.ckpt"""
lowerCamelCase_ = """flax_model.msgpack"""
lowerCamelCase_ = """flax_model.msgpack.index.json"""
lowerCamelCase_ = """model.safetensors"""
lowerCamelCase_ = """model.safetensors.index.json"""
lowerCamelCase_ = """config.json"""
lowerCamelCase_ = """preprocessor_config.json"""
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = """generation_config.json"""
lowerCamelCase_ = """modelcard.json"""
lowerCamelCase_ = """▁"""
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def lowerCamelCase ( a_ ) -> Dict:
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
lowerCAmelCase_ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = LlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
lowerCAmelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = LlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
lowerCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
lowerCAmelCase_ = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0]
# select random slice
lowerCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a: Union[str, Any] = (LlamaForCausalLM,) if is_torch_available() else ()
__a: str = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a: str = False
__a: int = False
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = LlamaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'single_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = 'multi_label_classification'
lowerCAmelCase_ = input_dict['input_ids']
lowerCAmelCase_ = input_ids.ne(1 ).to(lowercase_ )
lowerCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ = LlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = ids_tensor([1, 1_0] , config.vocab_size )
lowerCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = LlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = original_model(lowercase_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase_ = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase_ = LlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
lowerCAmelCase_ = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5 ) )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
lowerCAmelCase_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
lowerCAmelCase_ = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
lowerCAmelCase_ = model(torch.tensor(lowercase_ ) )
# Expected mean on dim = -1
lowerCAmelCase_ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCAmelCase_ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
lowerCAmelCase_ = model(torch.tensor(lowercase_ ) )
lowerCAmelCase_ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowercase_ , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowerCAmelCase_ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , lowercase_ , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
lowerCAmelCase_ = 'Simply put, the theory of relativity states that '
lowerCAmelCase_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
lowerCAmelCase_ = tokenizer.encode(lowercase_ , return_tensors='pt' )
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=lowercase_ )
# greedy generation outputs
lowerCAmelCase_ = model.generate(lowercase_ , max_new_tokens=6_4 , top_p=lowercase_ , temperature=1 , do_sample=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 361 |
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ : Optional[Any] = [image]
lowerCAmelCase_ : Union[str, Any] = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ : Optional[Any] = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ : int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ : str = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ : Any = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ : str = init_latents.shape
lowerCAmelCase_ : List[str] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ : Optional[int] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ : Union[str, Any] = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ : Optional[int] = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ : Union[str, Any] = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ : Tuple = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ : str = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ : List[Any] = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : int = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 362 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> Any:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def _lowercase ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = {}, {}
if padding is not None:
lowerCAmelCase_ = padding
if truncation is not None:
lowerCAmelCase_ = truncation
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase_ , lowercase_ = None , **lowercase_ ) -> int:
'''simple docstring'''
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = {'image': image, 'question': question}
else:
lowerCAmelCase_ = image
lowerCAmelCase_ = super().__call__(lowercase_ , **lowercase_ )
return results
def _lowercase ( self , lowercase_ , lowercase_=False , lowercase_=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = load_image(inputs['image'] )
lowerCAmelCase_ = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.model(**lowercase_ )
return model_outputs
def _lowercase ( self , lowercase_ , lowercase_=5 ) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.sigmoid()[0]
lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(lowercase_ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 14 | 0 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCamelCase_ : Tuple = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : str = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Tuple = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : int = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Optional[int] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : Tuple = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Tuple = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Optional[Any] = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : int = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : int = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def lowerCamelCase ( a_ ) -> Tuple:
if _re_test_backend.search(a_ ) is None:
return None
lowerCAmelCase_ = [b[0] for b in _re_backend.findall(a_ )]
backends.sort()
return "_and_".join(a_ )
def lowerCamelCase ( a_ ) -> Any:
with open(a_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase_ = f.readlines()
lowerCAmelCase_ = 0
while line_index < len(a_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase_ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_ ):
lowerCAmelCase_ = _re_one_line_import_struct.search(a_ ).groups()[0]
lowerCAmelCase_ = re.findall('\[([^\]]+)\]' , a_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowerCAmelCase_ = _re_import_struct_key_value.search(a_ )
if single_line_import_search is not None:
lowerCAmelCase_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(a_ ) > 0]
objects.extend(a_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase_ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowerCAmelCase_ = lines[line_index]
if _re_import_struct_add_one.search(a_ ) is not None:
objects.append(_re_import_struct_add_one.search(a_ ).groups()[0] )
elif _re_import_struct_add_many.search(a_ ) is not None:
lowerCAmelCase_ = _re_import_struct_add_many.search(a_ ).groups()[0].split(', ' )
lowerCAmelCase_ = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_between_brackets.search(a_ ) is not None:
lowerCAmelCase_ = _re_between_brackets.search(a_ ).groups()[0].split(', ' )
lowerCAmelCase_ = [obj[1:-1] for obj in imports if len(a_ ) > 0]
objects.extend(a_ )
elif _re_quote_object.search(a_ ) is not None:
objects.append(_re_quote_object.search(a_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase_ = []
while (
line_index < len(a_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowerCAmelCase_ = lines[line_index]
lowerCAmelCase_ = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase_ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(a_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase_ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowerCAmelCase_ = lines[line_index]
lowerCAmelCase_ = _re_import.search(a_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase ( a_ , a_ ) -> Any:
def find_duplicates(a_ ):
return [k for k, v in collections.Counter(a_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase_ = []
for key in import_dict_objects.keys():
lowerCAmelCase_ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowerCAmelCase_ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase_ = 'base imports' if key == 'none' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCamelCase ( ) -> Union[str, Any]:
lowerCAmelCase_ = []
for root, _, files in os.walk(a_ ):
if "__init__.py" in files:
lowerCAmelCase_ = os.path.join(a_ , '__init__.py' )
lowerCAmelCase_ = parse_init(a_ )
if objects is not None:
lowerCAmelCase_ = analyze_results(*a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(a_ ) )
if len(a_ ) > 0:
raise ValueError('\n\n'.join(a_ ) )
def lowerCamelCase ( ) -> List[str]:
lowerCAmelCase_ = []
for path, directories, files in os.walk(a_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(a_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_ ) / folder).glob('*.py' ) ) ) == 0:
continue
lowerCAmelCase_ = str((Path(a_ ) / folder).relative_to(a_ ) )
lowerCAmelCase_ = short_path.replace(os.path.sep , '.' )
submodules.append(a_ )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase_ = str((Path(a_ ) / fname).relative_to(a_ ) )
lowerCAmelCase_ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(a_ )
return submodules
lowerCamelCase_ : List[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def lowerCamelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase_ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(a_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase_ = spec.loader.load_module()
lowerCAmelCase_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_ ) > 0:
lowerCAmelCase_ = '\n'.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase_ = 1_6
lowerCamelCase_ = 3_2
def lowerCamelCase ( a_ ) -> int:
return int(x / 2**20 )
class a_ :
'''simple docstring'''
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self , *lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ = torch.cuda.memory_allocated()
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
lowerCAmelCase_ = bamb(self.end - self.begin )
lowerCAmelCase_ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase ( a_ , a_ = 16 , a_ = "bert-base-cased" , a_ = 320 , a_ = 160 , ) -> Optional[int]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained(a_ )
lowerCAmelCase_ = load_dataset(
'glue' , 'mrpc' , split={'train': F'''train[:{n_train}]''', 'validation': F'''validation[:{n_val}]'''} )
def tokenize_function(a_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a_ , max_length=a_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
a_ , batched=a_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(a_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=a_ , collate_fn=a_ , batch_size=a_ )
return train_dataloader, eval_dataloader
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# Initialize accelerator
lowerCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['lr']
lowerCAmelCase_ = int(config['num_epochs'] )
lowerCAmelCase_ = int(config['seed'] )
lowerCAmelCase_ = int(config['batch_size'] )
lowerCAmelCase_ = args.model_name_or_path
set_seed(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(a_ , a_ , a_ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(a_ , return_dict=a_ )
# Instantiate optimizer
lowerCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=a_ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCAmelCase_ = 1
lowerCAmelCase_ = (len(a_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=a_ , num_warmup_steps=0 , num_training_steps=a_ , )
else:
lowerCAmelCase_ = DummyScheduler(a_ , total_num_steps=a_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ = 0
# Now we train the model
lowerCAmelCase_ = {}
for epoch in range(a_ , a_ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a_ ):
lowerCAmelCase_ = model(**a_ )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(a_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(a_ , a_ )
def lowerCamelCase ( ) -> Any:
lowerCAmelCase_ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a_ , )
parser.add_argument(
'--output_dir' , type=a_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=a_ , default=a_ , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=a_ , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=a_ , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=a_ , default=1 , help='Number of train epochs.' , )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a_ , a_ )
if __name__ == "__main__":
main()
| 364 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 0 |
"""simple docstring"""
def lowerCamelCase ( a_ ) -> "list[int]":
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
lowerCAmelCase_ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowerCAmelCase_ = 1
if upper_limit > 0:
lowerCAmelCase_ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(a_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
lowerCamelCase_ = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 365 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: int
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = [[] for _ in range(lowercase_ )]
lowerCAmelCase_ = size
def __getitem__( self , lowercase_ ) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self._size
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowercase_ , lowercase_ ) )
def _lowercase ( self , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
lowerCAmelCase_ = deque([start_vertex] )
lowerCAmelCase_ = [None] * self.size
lowerCAmelCase_ = 0
while queue:
lowerCAmelCase_ = queue.popleft()
lowerCAmelCase_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCAmelCase_ = current_distance + edge.weight
lowerCAmelCase_ = distances[edge.destination_vertex]
if (
isinstance(lowercase_ , lowercase_ )
and new_distance >= dest_vertex_distance
):
continue
lowerCAmelCase_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 366 |
from __future__ import annotations
lowerCamelCase_ = 1_0
def lowerCamelCase ( a_ ) -> list[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = max(a_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ = [[] for _ in range(a_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ = int((i / placement) % RADIX )
buckets[tmp].append(a_ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ = 0
for b in range(a_ ):
for i in buckets[b]:
lowerCAmelCase_ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase ( ) -> int:
lowerCAmelCase_ = 9
lowerCAmelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase_ = kruskal(a_ , a_ )
lowerCAmelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(a_ ) == sorted(a_ )
| 367 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# load base model
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ = load_file(a_ )
lowerCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.text_encoder
else:
lowerCAmelCase_ = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
lowerCAmelCase_ = pipeline.unet
# find the target layer
lowerCAmelCase_ = layer_infos.pop(0 )
while len(a_ ) > -1:
try:
lowerCAmelCase_ = curr_layer.__getattr__(a_ )
if len(a_ ) > 0:
lowerCAmelCase_ = layer_infos.pop(0 )
elif len(a_ ) == 0:
break
except Exception:
if len(a_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ = layer_infos.pop(0 )
lowerCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(a_ )
else:
pair_keys.append(a_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(a_ , a_ )
# update visited list
for item in pair_keys:
visited.append(a_ )
return pipeline
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.base_model_path
lowerCamelCase_ = args.checkpoint_path
lowerCamelCase_ = args.dump_path
lowerCamelCase_ = args.lora_prefix_unet
lowerCamelCase_ = args.lora_prefix_text_encoder
lowerCamelCase_ = args.alpha
lowerCamelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a_ :
'''simple docstring'''
__a: int
__a: TreeNode | None = None
__a: TreeNode | None = None
lowerCamelCase_ = namedtuple("""CoinsDistribResult""", """moves excess""")
def lowerCamelCase ( a_ ) -> int:
if root is None:
return 0
# Validation
def count_nodes(a_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(a_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(a_ ) != count_coins(a_ ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(a_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase_ , lowerCAmelCase_ = get_distrib(node.left )
lowerCAmelCase_ , lowerCAmelCase_ = get_distrib(node.right )
lowerCAmelCase_ = 1 - left_distrib_excess
lowerCAmelCase_ = 1 - right_distrib_excess
lowerCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(a_ )
+ abs(a_ )
)
lowerCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(a_ , a_ )
return get_distrib(a_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 0 |
from __future__ import annotations
def lowerCamelCase ( a_ , a_ = None , a_ = None , a_ = False , ) -> tuple[int, float, str]:
lowerCAmelCase_ = cipher_alphabet or [chr(a_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCAmelCase_ = {
'a': 0.08_497,
'b': 0.01_492,
'c': 0.02_202,
'd': 0.04_253,
'e': 0.11_162,
'f': 0.02_228,
'g': 0.02_015,
'h': 0.06_094,
'i': 0.07_546,
'j': 0.00_153,
'k': 0.01_292,
'l': 0.04_025,
'm': 0.02_406,
'n': 0.06_749,
'o': 0.07_507,
'p': 0.01_929,
'q': 0.00_095,
'r': 0.07_587,
's': 0.06_327,
't': 0.09_356,
'u': 0.02_758,
'v': 0.00_978,
'w': 0.02_560,
'x': 0.00_150,
'y': 0.01_994,
'z': 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCAmelCase_ = frequencies_dict
if not case_sensitive:
lowerCAmelCase_ = ciphertext.lower()
# Chi squared statistic values
lowerCAmelCase_ = {}
# cycle through all of the shifts
for shift in range(len(a_ ) ):
lowerCAmelCase_ = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCAmelCase_ = (alphabet_letters.index(letter.lower() ) - shift) % len(
a_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCAmelCase_ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCAmelCase_ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.lower().count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCAmelCase_ = decrypted_with_shift.count(a_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCAmelCase_ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCAmelCase_ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCAmelCase_ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a_ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCAmelCase_ = min(
a_ , key=a_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
) | 369 |
from maths.prime_factors import prime_factors
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
lowerCAmelCase_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(a_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ ) -> str:
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowerCamelCase ( a_ , a_ , a_ = None ) -> str:
lowerCAmelCase_ = tesseract_config if tesseract_config is not None else ''
# apply OCR
lowerCAmelCase_ = to_pil_image(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = pil_image.size
lowerCAmelCase_ = pytesseract.image_to_data(a_ , lang=a_ , output_type='dict' , config=a_ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowerCAmelCase_ = [idx for idx, word in enumerate(a_ ) if not word.strip()]
lowerCAmelCase_ = [word for idx, word in enumerate(a_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
lowerCAmelCase_ = [coord for idx, coord in enumerate(a_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase_ = []
for x, y, w, h in zip(a_ , a_ , a_ , a_ ):
lowerCAmelCase_ = [x, y, x + w, y + h]
actual_boxes.append(a_ )
# finally, normalize the bounding boxes
lowerCAmelCase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(a_ , a_ , a_ ) )
assert len(a_ ) == len(a_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = "" , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = resample
lowerCAmelCase_ = apply_ocr
lowerCAmelCase_ = ocr_lang
lowerCAmelCase_ = tesseract_config
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = (size['height'], size['width'])
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for image in images:
lowerCAmelCase_ , lowerCAmelCase_ = apply_tesseract(lowercase_ , lowercase_ , lowercase_ )
words_batch.append(lowercase_ )
boxes_batch.append(lowercase_ )
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase_ = [flip_channel_order(lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase_ )
if apply_ocr:
lowerCAmelCase_ = words_batch
lowerCAmelCase_ = boxes_batch
return data
| 370 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_ ) -> int:
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_ )
lowerCAmelCase_ = eval_examples
lowerCAmelCase_ = post_process_function
def _lowercase ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]:
'''simple docstring'''
lowerCAmelCase_ = gen_kwargs.copy()
lowerCAmelCase_ = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
lowerCAmelCase_ = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
lowerCAmelCase_ = gen_kwargs
lowerCAmelCase_ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase_ = self.get_eval_dataloader(lowercase_ )
lowerCAmelCase_ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ = self.compute_metrics
lowerCAmelCase_ = None
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ = eval_loop(
lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowerCAmelCase_ = compute_metrics
lowerCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase_ = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCAmelCase_ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowerCAmelCase_ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase_ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = gen_kwargs.copy()
lowerCAmelCase_ = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase_ = self.compute_metrics
lowerCAmelCase_ = None
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase_ = eval_loop(
lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowerCAmelCase_ = compute_metrics
lowerCAmelCase_ = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase_ = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict' )
lowerCAmelCase_ = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
lowerCAmelCase_ = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 371 |
def lowerCamelCase ( a_ , a_ ) -> List[Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( a_ , a_ , a_ ) -> Union[str, Any]:
lowerCAmelCase_ = 0
while b > 0:
if b & 1:
lowerCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 14 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.