code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from statistics import mean
import numpy as np
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
a__ = 0
# Number of processes finished
a__ = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a__ = [0] * no_of_process
# List to include calculation results
a__ = [0] * no_of_process
# Sort by arrival time.
a__ = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
a__ = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
a__ = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a__ = arrival_time[i]
a__ = 0
# Index showing the location of the process being performed
a__ = 0
# Saves the current response ratio.
a__ = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a__ = temp
a__ = i
# Calculate the turn around time
a__ = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a__ = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
a__ = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
a__ = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
snake_case : List[Any] = 5
snake_case : List[str] = ['''A''', '''B''', '''C''', '''D''', '''E''']
snake_case : Dict = [1, 2, 3, 4, 5]
snake_case : List[Any] = [1, 2, 3, 4, 5]
snake_case : str = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
snake_case : Optional[int] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(f"""average waiting time : {mean(waiting_time):.5f}""")
print(f"""average turn around time : {mean(turn_around_time):.5f}""")
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any]=False ):
a__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
a__ = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ = ''
else:
a__ = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
a__ = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[
: config.hidden_size, :
]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : List[str] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( ):
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ):
a__ = DeiTConfig()
# all deit models have fine-tuned heads
a__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = int(deit_name[-6:-4] )
a__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
a__ = 1_9_2
a__ = 7_6_8
a__ = 1_2
a__ = 3
elif deit_name[9:].startswith('small' ):
a__ = 3_8_4
a__ = 1_5_3_6
a__ = 1_2
a__ = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
a__ = 1_0_2_4
a__ = 4_0_9_6
a__ = 2_4
a__ = 1_6
# load original model from timm
a__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ = timm_model.state_dict()
a__ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
a__ = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
a__ = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
a__ = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
a__ = image_processor(images=prepare_img() , return_tensors='pt' )
a__ = encoding['pixel_values']
a__ = model(__lowerCAmelCase )
a__ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
snake_case : Any = pytest.mark.integration
@require_faiss
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
a__ = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
import faiss
a__ = self._create_dummy_dataset()
a__ = dset.map(
lambda __snake_case ,__snake_case : {"vecs": i * np.ones(5 ,dtype=np.floataa )} ,with_indices=__snake_case ,keep_in_memory=__snake_case )
a__ = dset.add_faiss_index('vecs' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT )
a__ , a__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
dset.drop_index('vecs' )
def lowerCamelCase__( self :Any ) -> str:
import faiss
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,batch_size=1_00 ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
a__ , a__ = dset.get_nearest_examples('vecs' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def lowerCamelCase__( self :int ) -> List[Any]:
import faiss
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' ,metric_type=faiss.METRIC_INNER_PRODUCT ,)
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
dset.save_faiss_index('vecs' ,tmp_file.name )
dset.load_faiss_index('vecs2' ,tmp_file.name )
os.unlink(tmp_file.name )
a__ , a__ = dset.get_nearest_examples('vecs2' ,np.ones(5 ,dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
a__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 ,1 ) ,index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__snake_case ,partial(dset.get_nearest_examples ,'vecs2' ,np.ones(5 ,dtype=np.floataa ) ) )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
a__ = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a__ = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
a__ = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
a__ = Elasticsearch()
dset.add_elasticsearch_index('filename' ,es_client=__snake_case )
a__ , a__ = dset.get_nearest_examples('filename' ,'my_name-train_29' )
self.assertEqual(examples['filename'][0] ,'my_name-train_29' )
@require_faiss
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :str ) -> Union[str, Any]:
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal ,5 )
index.add_vectors(np.zeros((5, 5) ,dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal ,10 )
# single query
a__ = np.zeros(5 ,dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__snake_case )
self.assertRaises(__snake_case ,index.search ,query.reshape(-1 ,1 ) )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
# batched queries
a__ = np.eye(5 ,dtype=np.floataa )[::-1]
a__ , a__ = index.search_batch(__snake_case )
self.assertRaises(__snake_case ,index.search_batch ,queries[0] )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([4, 3, 2, 1, 0] ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
import faiss
a__ = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
a__ = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexLSH )
with self.assertRaises(__snake_case ):
a__ = FaissIndex(string_factory='Flat' ,custom_index=faiss.IndexFlat(5 ) )
def lowerCamelCase__( self :Any ) -> Union[str, Any]:
import faiss
a__ = faiss.IndexFlat(5 )
a__ = FaissIndex(custom_index=__snake_case )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index ,faiss.IndexFlat )
def lowerCamelCase__( self :int ) -> Optional[Any]:
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
index.save(tmp_file.name )
a__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a__ = np.zeros(5 ,dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__snake_case )
self.assertGreater(scores[0] ,0 )
self.assertEqual(indices[0] ,1 )
@require_faiss
def __lowercase ( __lowerCAmelCase : Dict ):
import faiss
a__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a__ = 'index.faiss'
a__ = F'mock://{index_name}'
index.save(__lowerCAmelCase , storage_options=mockfs.storage_options )
a__ = FaissIndex.load(__lowerCAmelCase , storage_options=mockfs.storage_options )
a__ = np.zeros(5 , dtype=np.floataa )
a__ = 1
a__ , a__ = index.search(__lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a__ = Elasticsearch()
a__ = {'acknowledged': True}
a__ = ElasticSearchIndex(es_client=__snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
a__ = 'foo'
a__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a__ , a__ = index.search(__snake_case )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# single query with timeout
a__ = 'foo'
a__ = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a__ , a__ = index.search(__snake_case ,request_timeout=30 )
self.assertEqual(scores[0] ,1 )
self.assertEqual(indices[0] ,0 )
# batched queries
a__ = ['foo', 'bar', 'foobar']
a__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a__ , a__ = index.search_batch(__snake_case )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([1, 1, 1] ,__snake_case )
# batched queries with timeout
a__ = ['foo', 'bar', 'foobar']
a__ = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a__ , a__ = index.search_batch(__snake_case ,request_timeout=30 )
a__ = [scores[0] for scores in total_scores]
a__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) ,0 )
self.assertListEqual([1, 1, 1] ,__snake_case )
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
from maths.prime_factors import prime_factors
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCAmelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(__lowerCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''flax''', '''transformers''']
def __init__( self :Any ,*__snake_case :List[Any] ,**__snake_case :str ) -> Any:
requires_backends(self ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :List[str] ,*__snake_case :Optional[int] ,**__snake_case :str ) -> Dict:
requires_backends(cls ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Any ,*__snake_case :Dict ,**__snake_case :Any ) -> Tuple:
requires_backends(cls ,['flax', 'transformers'] )
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = ['''flax''', '''transformers''']
def __init__( self :Optional[Any] ,*__snake_case :str ,**__snake_case :List[str] ) -> Optional[Any]:
requires_backends(self ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ,*__snake_case :Tuple ,**__snake_case :List[str] ) -> List[str]:
requires_backends(cls ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Any ,*__snake_case :str ,**__snake_case :Optional[Any] ) -> Dict:
requires_backends(cls ,['flax', 'transformers'] )
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self :Tuple ,*__snake_case :str ,**__snake_case :Optional[int] ) -> Optional[Any]:
requires_backends(self ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :int ,*__snake_case :Optional[Any] ,**__snake_case :Any ) -> Tuple:
requires_backends(cls ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Optional[int] ,*__snake_case :Dict ,**__snake_case :str ) -> int:
requires_backends(cls ,['flax', 'transformers'] )
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = ['''flax''', '''transformers''']
def __init__( self :Union[str, Any] ,*__snake_case :List[str] ,**__snake_case :Union[str, Any] ) -> Dict:
requires_backends(self ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Dict ,*__snake_case :Optional[Any] ,**__snake_case :Tuple ) -> Union[str, Any]:
requires_backends(cls ,['flax', 'transformers'] )
@classmethod
def lowerCamelCase__( cls :Any ,*__snake_case :Optional[Any] ,**__snake_case :Tuple ) -> Optional[int]:
requires_backends(cls ,['flax', 'transformers'] )
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ,return_dict=__snake_case ).to(__snake_case )
a__ = AutoTokenizer.from_pretrained('google/mt5-small' )
a__ = tokenizer('Hello there' ,return_tensors='pt' ).input_ids
a__ = tokenizer('Hi I am' ,return_tensors='pt' ).input_ids
a__ = model(input_ids.to(__snake_case ) ,labels=labels.to(__snake_case ) ).loss
a__ = -(labels.shape[-1] * loss.item())
a__ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = ParquetDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str ):
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = parquet_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [parquet_path]
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_dataset(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=("train",) ):
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
a__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ):
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = features.copy() if features else default_expected_features
a__ = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ = ParquetDatasetReader({'train': parquet_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
if split:
a__ = {split: parquet_path}
else:
a__ = 'train'
a__ = {'train': parquet_path, 'test': parquet_path}
a__ = tmp_path / 'cache'
a__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
a__ = ParquetDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_parquet_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
a__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a__ = pq.ParquetFile(tmp_path / 'foo.parquet' )
a__ = pf.read()
assert dataset.data.table == output_table
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
a__ = str(shared_datadir / 'test_image_rgb.jpg' )
a__ = {'image': [image_path]}
a__ = Features({'image': Image()} )
a__ = Dataset.from_dict(__lowerCAmelCase , features=__lowerCAmelCase )
a__ = ParquetDatasetWriter(__lowerCAmelCase , tmp_path / 'foo.parquet' )
assert writer.write() > 0
a__ = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
a__ = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=__lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
assert get_writer_batch_size(__lowerCAmelCase ) == expected
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
snake_case : Dict = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''vivit'''
def __init__( self :Union[str, Any] ,__snake_case :Dict=2_24 ,__snake_case :Any=32 ,__snake_case :Any=[2, 16, 16] ,__snake_case :Optional[int]=3 ,__snake_case :Dict=7_68 ,__snake_case :Any=12 ,__snake_case :int=12 ,__snake_case :Tuple=30_72 ,__snake_case :Optional[Any]="gelu_fast" ,__snake_case :Dict=0.0 ,__snake_case :Union[str, Any]=0.0 ,__snake_case :str=0.02 ,__snake_case :Any=1E-06 ,__snake_case :Any=True ,**__snake_case :Optional[Any] ,) -> List[Any]:
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = image_size
a__ = num_frames
a__ = tubelet_size
a__ = num_channels
a__ = qkv_bias
super().__init__(**__snake_case )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
snake_case : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowercase ( __lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
a__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
a__ , a__ = image[0].size
a__ , a__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a__ = np.concatenate(__lowerCAmelCase , axis=0 )
a__ = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
a__ = image.transpose(0 , 3 , 1 , 2 )
a__ = 2.0 * image - 1.0
a__ = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
a__ = torch.cat(__lowerCAmelCase , dim=0 )
return image
def __lowercase ( __lowerCAmelCase : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
a__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
a__ , a__ = mask[0].size
a__ , a__ = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
a__ = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
a__ = np.concatenate(__lowerCAmelCase , axis=0 )
a__ = mask.astype(np.floataa ) / 255.0
a__ = 0
a__ = 1
a__ = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
a__ = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : UNetaDModel
UpperCAmelCase__ : RePaintScheduler
def __init__( self :Tuple ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=__snake_case ,scheduler=__snake_case )
@torch.no_grad()
def __call__( self :List[str] ,__snake_case :Union[torch.Tensor, PIL.Image.Image] ,__snake_case :Union[torch.Tensor, PIL.Image.Image] ,__snake_case :int = 2_50 ,__snake_case :float = 0.0 ,__snake_case :int = 10 ,__snake_case :int = 10 ,__snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__snake_case :Optional[str] = "pil" ,__snake_case :bool = True ,) -> Union[ImagePipelineOutput, Tuple]:
a__ = image
a__ = _preprocess_image(__snake_case )
a__ = original_image.to(device=self.device ,dtype=self.unet.dtype )
a__ = _preprocess_mask(__snake_case )
a__ = mask_image.to(device=self.device ,dtype=self.unet.dtype )
a__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__snake_case ,__snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
a__ = original_image.shape
a__ = randn_tensor(__snake_case ,generator=__snake_case ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case ,__snake_case ,__snake_case ,self.device )
a__ = eta
a__ = self.scheduler.timesteps[0] + 1
a__ = generator[0] if isinstance(__snake_case ,__snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
a__ = self.unet(__snake_case ,__snake_case ).sample
# compute previous image: x_t -> x_t-1
a__ = self.scheduler.step(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a__ = self.scheduler.undo_step(__snake_case ,__snake_case ,__snake_case )
a__ = t
a__ = (image / 2 + 0.5).clamp(0 ,1 )
a__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
a__ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
from collections import namedtuple
snake_case : Optional[Any] = namedtuple('''from_to''', '''from_ to''')
snake_case : Optional[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def __lowercase ( __lowerCAmelCase : float , __lowerCAmelCase : str , __lowerCAmelCase : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(__lowerCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(__lowerCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case_ :
def __init__( self :int ,__snake_case :List[Any] ,__snake_case :Optional[Any]=13 ,__snake_case :Dict=7 ,__snake_case :Dict=True ,__snake_case :Tuple=True ,__snake_case :Tuple=True ,__snake_case :Optional[Any]=99 ,__snake_case :List[str]=32 ,__snake_case :Any=5 ,__snake_case :Optional[int]=4 ,__snake_case :int=37 ,__snake_case :int="gelu" ,__snake_case :Optional[Any]=0.1 ,__snake_case :Any=0.1 ,__snake_case :Union[str, Any]=5_12 ,__snake_case :str=16 ,__snake_case :str=2 ,__snake_case :int=0.02 ,__snake_case :Dict=3 ,__snake_case :Union[str, Any]=4 ,__snake_case :Dict=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Dict ,*__snake_case :Dict ) -> List[Any]:
a__ = OpenAIGPTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
a__ = model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :str ,*__snake_case :List[Any] ) -> Union[str, Any]:
a__ = OpenAIGPTLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Any ,__snake_case :Tuple ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :Tuple ,*__snake_case :Optional[int] ) -> List[str]:
a__ = OpenAIGPTDoubleHeadsModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[Any] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Dict ,*__snake_case :str ) -> Dict:
a__ = self.num_labels
a__ = OpenAIGPTForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :str ) -> str:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
UpperCAmelCase__ : int = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase__( self :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Tuple ,__snake_case :List[str] ) -> List[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any=False ) -> Optional[Any]:
a__ = super()._prepare_for_class(__snake_case ,__snake_case ,return_labels=__snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=__snake_case ,)
a__ = inputs_dict['labels']
a__ = inputs_dict['labels']
a__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=__snake_case ,)
a__ = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=__snake_case )
return inputs_dict
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
a__ = OpenAIGPTModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[Any] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
def lowerCamelCase__( self :str ) -> List[str]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*__snake_case )
def lowerCamelCase__( self :int ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__snake_case )
@slow
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = OpenAIGPTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[int]:
a__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(__snake_case )
a__ = torch.tensor([[4_81, 47_35, 5_44]] ,dtype=torch.long ,device=__snake_case ) # the president is
a__ = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> int:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding='utf-8' ,check=__snake_case ,)
assert hasattr(self ,'env' )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any]=1 ) -> Optional[Any]:
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'{self.env.base_job_name}-single' ,instance_count=__snake_case ,instance_type=self.instance_type ,debugger_hook_config=__snake_case ,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='py36' ,)
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[str] ) -> List[str]:
TrainingJobAnalytics(__snake_case ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
# create estimator
a__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__snake_case )
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> int:
a__ = inspect.getfile(accelerate.test_utils )
a__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
a__ = test_metrics
@require_cpu
def lowerCamelCase__( self :int ) -> str:
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def lowerCamelCase__( self :Union[str, Any] ) -> Optional[Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase__( self :int ) -> Tuple:
print(F'Found {torch.cuda.device_count()} devices.' )
a__ = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case ,env=os.environ.copy() )
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case : List[Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = ['''input_features''']
def __init__( self :Tuple ,__snake_case :Tuple=80 ,__snake_case :Any=1_60_00 ,__snake_case :Any=1_60 ,__snake_case :List[Any]=30 ,__snake_case :Optional[Any]=4_00 ,__snake_case :Tuple=0.0 ,__snake_case :Any=False ,**__snake_case :Optional[Any] ,) -> Optional[int]:
super().__init__(
feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
a__ = n_fft
a__ = hop_length
a__ = chunk_length
a__ = chunk_length * sampling_rate
a__ = self.n_samples // hop_length
a__ = sampling_rate
a__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__snake_case ,min_frequency=0.0 ,max_frequency=80_00.0 ,sampling_rate=__snake_case ,norm='slaney' ,mel_scale='slaney' ,)
def lowerCamelCase__( self :Tuple ,__snake_case :np.array ) -> np.ndarray:
a__ = spectrogram(
__snake_case ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,)
a__ = log_spec[:, :-1]
a__ = np.maximum(__snake_case ,log_spec.max() - 8.0 )
a__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase__( __snake_case :List[np.ndarray] ,__snake_case :List[np.ndarray] ,__snake_case :float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
a__ = np.array(__snake_case ,np.intaa )
a__ = []
for vector, length in zip(__snake_case ,attention_mask.sum(-1 ) ):
a__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ = padding_value
normed_input_values.append(__snake_case )
else:
a__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self :Optional[int] ,__snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__snake_case :bool = True ,__snake_case :Optional[int] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[str] = "max_length" ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[bool] = None ,**__snake_case :Dict ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case ,np.ndarray ):
a__ = np.asarray(__snake_case ,dtype=np.floataa )
elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [np.asarray([raw_speech] ).T]
a__ = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
a__ = self.pad(
__snake_case ,padding=__snake_case ,max_length=max_length if max_length else self.n_samples ,truncation=__snake_case ,pad_to_multiple_of=__snake_case ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
a__ = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,)
a__ = np.stack(padded_inputs['input_features'] ,axis=0 )
# make sure list is in array format
a__ = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 )
a__ = [self._np_extract_fbank_features(__snake_case ) for waveform in input_features[0]]
if isinstance(input_features[0] ,__snake_case ):
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_features]
else:
a__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a__ = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
a__ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
def lowerCamelCase__( self :List[Any] ) -> Dict[str, Any]:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case : Optional[Any] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = ['''LayoutLMv2FeatureExtractor''']
snake_case : Optional[Any] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Any = logging.get_logger()
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : List[nn.Module] = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : list = field(default_factory=lowerCamelCase_ )
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :Tensor ,__snake_case :Tensor ) -> Any:
a__ = len(list(m.modules() ) ) == 1 or isinstance(__snake_case ,nn.Convad ) or isinstance(__snake_case ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(__snake_case )
def __call__( self :str ,__snake_case :Tensor ) -> Tuple:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__snake_case )
[x.remove() for x in self.handles]
return self
@property
def lowerCamelCase__( self :Optional[Any] ) -> Any:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class snake_case_ :
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : nn.Module
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
UpperCAmelCase__ : List = field(default_factory=lowerCamelCase_ )
def __call__( self :Union[str, Any] ,__snake_case :Tensor ) -> str:
a__ = Tracker(self.dest )(__snake_case ).parametrized
a__ = Tracker(self.src )(__snake_case ).parametrized
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.src_skip ,__snake_case ) )
a__ = list(filter(lambda __snake_case : type(__snake_case ) not in self.dest_skip ,__snake_case ) )
if len(__snake_case ) != len(__snake_case ):
raise Exception(
F'Numbers of operations are different. Source module has {len(__snake_case )} operations while'
F' destination module has {len(__snake_case )}.' )
for dest_m, src_m in zip(__snake_case ,__snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : ResNetConfig , __lowerCAmelCase : Path , __lowerCAmelCase : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
a__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
a__ = ResNetForImageClassification(__lowerCAmelCase ).eval()
a__ = ModuleTransfer(src=__lowerCAmelCase , dest=__lowerCAmelCase )
a__ = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowerCAmelCase )
assert torch.allclose(from_model(__lowerCAmelCase ) , our_model(__lowerCAmelCase ).logits ), "The model logits don't match the original one."
a__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCAmelCase , )
# we can use the convnext one
a__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __lowercase ( __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = True ):
a__ = 'imagenet-1k-id2label.json'
a__ = 1_0_0_0
a__ = (1, num_labels)
a__ = 'huggingface/label-files'
a__ = num_labels
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
a__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(__lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
snake_case : List[str] = parser.parse_args()
snake_case : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case : Optional[int] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : int ):
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = ['''pixel_values''']
def __init__( self :Dict ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :bool = True ,__snake_case :Dict[str, int] = None ,__snake_case :bool = True ,__snake_case :Union[int, float] = 1 / 2_55 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,**__snake_case :Optional[int] ,) -> None:
super().__init__(**__snake_case )
a__ = size if size is not None else {'shortest_edge': 2_56}
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
a__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a__ = get_size_dict(__snake_case ,param_name='crop_size' )
a__ = do_resize
a__ = size
a__ = do_center_crop
a__ = crop_size
a__ = resample
a__ = do_rescale
a__ = rescale_factor
a__ = offset
a__ = do_normalize
a__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__( self :Optional[Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :PILImageResampling = PILImageResampling.BILINEAR ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> np.ndarray:
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
if "shortest_edge" in size:
a__ = get_resize_output_image_size(__snake_case ,size['shortest_edge'] ,default_to_square=__snake_case )
elif "height" in size and "width" in size:
a__ = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :Dict[str, int] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> np.ndarray:
a__ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__snake_case ,size=(size['height'], size['width']) ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Union[int, float] ,__snake_case :bool = True ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :str ,) -> Union[str, Any]:
a__ = image.astype(np.floataa )
if offset:
a__ = image - (scale / 2)
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :np.ndarray ,__snake_case :Union[float, List[float]] ,__snake_case :Union[float, List[float]] ,__snake_case :Optional[Union[str, ChannelDimension]] = None ,**__snake_case :Union[str, Any] ,) -> np.ndarray:
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :bool = None ,__snake_case :float = None ,__snake_case :bool = None ,__snake_case :bool = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[ChannelDimension] = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
a__ = to_numpy_array(__snake_case )
if do_resize:
a__ = self.resize(image=__snake_case ,size=__snake_case ,resample=__snake_case )
if do_center_crop:
a__ = self.center_crop(__snake_case ,size=__snake_case )
if do_rescale:
a__ = self.rescale(image=__snake_case ,scale=__snake_case ,offset=__snake_case )
if do_normalize:
a__ = self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case )
a__ = to_channel_dimension_format(__snake_case ,__snake_case )
return image
def lowerCamelCase__( self :Any ,__snake_case :ImageInput ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :PILImageResampling = None ,__snake_case :bool = None ,__snake_case :Dict[str, int] = None ,__snake_case :bool = None ,__snake_case :float = None ,__snake_case :bool = None ,__snake_case :bool = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[float, List[float]]] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :Any ,) -> PIL.Image.Image:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = resample if resample is not None else self.resample
a__ = do_center_crop if do_center_crop is not None else self.do_center_crop
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = rescale_factor if rescale_factor is not None else self.rescale_factor
a__ = offset if offset is not None else self.offset
a__ = do_normalize if do_normalize is not None else self.do_normalize
a__ = image_mean if image_mean is not None else self.image_mean
a__ = image_std if image_std is not None else self.image_std
a__ = size if size is not None else self.size
a__ = get_size_dict(__snake_case ,default_to_square=__snake_case )
a__ = crop_size if crop_size is not None else self.crop_size
a__ = get_size_dict(__snake_case ,param_name='crop_size' )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
a__ = make_batched(__snake_case )
a__ = [
[
self._preprocess_image(
image=__snake_case ,do_resize=__snake_case ,size=__snake_case ,resample=__snake_case ,do_center_crop=__snake_case ,crop_size=__snake_case ,do_rescale=__snake_case ,rescale_factor=__snake_case ,offset=__snake_case ,do_normalize=__snake_case ,image_mean=__snake_case ,image_std=__snake_case ,data_format=__snake_case ,)
for img in video
]
for video in videos
]
a__ = {'pixel_values': videos}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import functools
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = len(__lowerCAmelCase )
a__ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
snake_case : Union[str, Any] = parser.parse_args()
if args.model_type == "bert":
snake_case : Any = BertForMaskedLM.from_pretrained(args.model_name)
snake_case : Optional[Any] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
snake_case : Optional[Any] = model.state_dict()
snake_case : int = {}
for w in ["word_embeddings", "position_embeddings"]:
snake_case : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
snake_case : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
snake_case : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
snake_case : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
snake_case : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
snake_case : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
snake_case : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
snake_case : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
snake_case : Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
snake_case : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
snake_case : int = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
snake_case : int = state_dict['''cls.predictions.decoder.weight''']
snake_case : int = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
snake_case : Optional[int] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
snake_case : Optional[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case : Dict = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
snake_case : Tuple = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
snake_case : List[str] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def __lowercase ( __lowerCAmelCase : str ):
def remove_articles(__lowerCAmelCase : Optional[Any] ):
a__ = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(__lowerCAmelCase , ' ' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : int ):
a__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] ):
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = [any(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for ref in refs ) for pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase )]
return (sum(__lowerCAmelCase ) / len(__lowerCAmelCase )) * 1_0_0
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
a__ = [rgram for rgrams in rgramslist for rgram in rgrams]
a__ = Counter(__lowerCAmelCase )
a__ = Counter(__lowerCAmelCase )
a__ = Counter()
for sgram, scount in sgramcounter.items():
a__ = scount * numref
a__ = Counter(__lowerCAmelCase )
a__ = Counter()
for cgram, ccount in cgramcounter.items():
a__ = ccount * numref
# KEEP
a__ = sgramcounter_rep & cgramcounter_rep
a__ = keepgramcounter_rep & rgramcounter
a__ = sgramcounter_rep & rgramcounter
a__ = 0
a__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = keeptmpscorea / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
a__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
a__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
a__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
a__ = sgramcounter_rep - cgramcounter_rep
a__ = delgramcounter_rep - rgramcounter
a__ = sgramcounter_rep - rgramcounter
a__ = 0
a__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = deltmpscorea / len(__lowerCAmelCase )
# ADDITION
a__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
a__ = set(__lowerCAmelCase ) & set(__lowerCAmelCase )
a__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
a__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
a__ = 1
a__ = 1
if len(__lowerCAmelCase ) > 0:
a__ = addtmpscore / len(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
a__ = addtmpscore / len(__lowerCAmelCase )
a__ = 0
if addscore_precision > 0 or addscore_recall > 0:
a__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
a__ = len(__lowerCAmelCase )
a__ = ssent.split(' ' )
a__ = csent.split(' ' )
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
a__ = []
for rsent in rsents:
a__ = rsent.split(' ' )
a__ = []
a__ = []
a__ = []
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
ragramslist.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(__lowerCAmelCase )
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
if i < len(__lowerCAmelCase ) - 1:
a__ = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 2:
a__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(__lowerCAmelCase )
if i < len(__lowerCAmelCase ) - 3:
a__ = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(__lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
((a__) , (a__) , (a__)) = SARIngram(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
a__ = sum([delascore, delascore, delascore, delascore] ) / 4
a__ = sum([addascore, addascore, addascore, addascore] ) / 4
a__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "13a" , __lowerCAmelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
a__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
a__ = sacrebleu.metrics.bleu._get_tokenizer(__lowerCAmelCase )()(__lowerCAmelCase )
else:
a__ = sacrebleu.TOKENIZERS[tokenizer]()(__lowerCAmelCase )
elif tokenizer == "moses":
a__ = sacremoses.MosesTokenizer().tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase , escape=__lowerCAmelCase )
elif tokenizer == "penn":
a__ = sacremoses.MosesTokenizer().penn_tokenize(__lowerCAmelCase , return_str=__lowerCAmelCase )
else:
a__ = sentence
if not return_str:
a__ = normalized_sent.split()
return normalized_sent
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
if not (len(__lowerCAmelCase ) == len(__lowerCAmelCase ) == len(__lowerCAmelCase )):
raise ValueError('Sources length must match predictions and references lengths.' )
a__ = 0
for src, pred, refs in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
sari_score += SARIsent(normalize(__lowerCAmelCase ) , normalize(__lowerCAmelCase ) , [normalize(__lowerCAmelCase ) for sent in refs] )
a__ = sari_score / len(__lowerCAmelCase )
return 1_0_0 * sari_score
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]="exp" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Union[str, Any]=False , ):
a__ = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
a__ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
a__ = sacrebleu.corpus_bleu(
__lowerCAmelCase , __lowerCAmelCase , smooth_method=__lowerCAmelCase , smooth_value=__lowerCAmelCase , force=__lowerCAmelCase , lowercase=__lowerCAmelCase , use_effective_order=__lowerCAmelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' ,id='sequence' ) ,id='references' ),
} ) ,codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] ,reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] ,)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Any ) -> Optional[int]:
a__ = {}
result.update({'sari': compute_sari(sources=__snake_case ,predictions=__snake_case ,references=__snake_case )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case ,references=__snake_case )} )
result.update({'exact': compute_em(predictions=__snake_case ,references=__snake_case )} )
return result
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : int = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''van'''
def __init__( self :List[Any] ,__snake_case :Optional[int]=2_24 ,__snake_case :Any=3 ,__snake_case :List[Any]=[7, 3, 3, 3] ,__snake_case :Tuple=[4, 2, 2, 2] ,__snake_case :Optional[int]=[64, 1_28, 3_20, 5_12] ,__snake_case :Optional[int]=[3, 3, 12, 3] ,__snake_case :Any=[8, 8, 4, 4] ,__snake_case :int="gelu" ,__snake_case :Tuple=0.02 ,__snake_case :Optional[int]=1E-6 ,__snake_case :Any=1E-2 ,__snake_case :Optional[int]=0.0 ,__snake_case :Tuple=0.0 ,**__snake_case :List[Any] ,) -> List[str]:
super().__init__(**__snake_case )
a__ = image_size
a__ = num_channels
a__ = patch_sizes
a__ = strides
a__ = hidden_sizes
a__ = depths
a__ = mlp_ratios
a__ = hidden_act
a__ = initializer_range
a__ = layer_norm_eps
a__ = layer_scale_init_value
a__ = drop_path_rate
a__ = dropout_rate
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case_ :
def __init__( self :Dict ,__snake_case :Optional[Any] ,) -> Tuple:
a__ = parent
a__ = 13
a__ = 7
a__ = 30
a__ = self.seq_length + self.mem_len
a__ = 15
a__ = True
a__ = True
a__ = 99
a__ = [10, 50, 80]
a__ = 32
a__ = 32
a__ = 4
a__ = 8
a__ = 1_28
a__ = 2
a__ = 2
a__ = None
a__ = 1
a__ = 0
a__ = 3
a__ = self.vocab_size - 1
a__ = 0.01
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = TransfoXLConfig(
vocab_size=self.vocab_size ,mem_len=self.mem_len ,clamp_len=self.clamp_len ,cutoffs=self.cutoffs ,d_model=self.hidden_size ,d_embed=self.d_embed ,n_head=self.num_attention_heads ,d_head=self.d_head ,d_inner=self.d_inner ,div_val=self.div_val ,n_layer=self.num_hidden_layers ,eos_token_id=self.eos_token_id ,pad_token_id=self.vocab_size - 1 ,init_range=self.init_range ,num_labels=self.num_labels ,)
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase__( self :Any ) -> Tuple:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ) -> Optional[int]:
a__ = TFTransfoXLModel(__snake_case )
a__ , a__ = model(__snake_case ).to_tuple()
a__ = {'input_ids': input_ids_a, 'mems': mems_a}
a__ , a__ = model(__snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def lowerCamelCase__( self :str ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :List[Any] ,__snake_case :Optional[Any] ) -> Tuple:
a__ = TFTransfoXLLMHeadModel(__snake_case )
a__ , a__ = model(__snake_case ).to_tuple()
a__ = {'input_ids': input_ids_a, 'labels': lm_labels}
a__ , a__ = model(__snake_case ).to_tuple()
a__ , a__ = model([input_ids_a, mems_a] ).to_tuple()
a__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
a__ , a__ = model(__snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
self.parent.assertEqual(lm_logits_a.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] ,[(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers ,)
def lowerCamelCase__( self :Tuple ,__snake_case :Tuple ,__snake_case :Dict ,__snake_case :str ,__snake_case :List[Any] ) -> int:
a__ = TFTransfoXLForSequenceClassification(__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :str ) -> Any:
a__ = self.prepare_config_and_inputs()
((a__) , (a__) , (a__) , (a__)) = config_and_inputs
a__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase__ : str = () if is_tf_available() else ()
UpperCAmelCase__ : Dict = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[Any] = False
def lowerCamelCase__( self :Dict ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :str ,__snake_case :List[Any] ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = TFTransfoXLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,d_embed=37 )
def lowerCamelCase__( self :str ) -> List[str]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :Dict ) -> Any:
self.model_tester.set_seed()
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__snake_case )
def lowerCamelCase__( self :int ) -> List[Any]:
self.model_tester.set_seed()
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__snake_case )
def lowerCamelCase__( self :Any ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__snake_case )
def lowerCamelCase__( self :Tuple ) -> Tuple:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a__ = model_class(__snake_case )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
a__ = model.get_output_embeddings()
assert isinstance(__snake_case ,tf.keras.layers.Layer )
a__ = model.get_bias()
assert name is None
else:
a__ = model.get_output_embeddings()
assert x is None
a__ = model.get_bias()
assert name is None
def lowerCamelCase__( self :str ) -> Tuple:
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase__( self :Optional[Any] ) -> List[Any]:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFTransfoXLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def lowerCamelCase__( self :str ) -> Optional[int]:
pass
@require_tf
class snake_case_ (unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def lowerCamelCase__( self :Any ) -> int:
a__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
a__ = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] ,dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a__ = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a__ = model.generate(__snake_case ,max_length=2_00 ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() ,__snake_case )
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : nn.ModuleList , __lowerCAmelCase : nn.ModuleList , __lowerCAmelCase : List[int] ):
a__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F'{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
snake_case : Any = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
snake_case : List[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
try:
a__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(__lowerCAmelCase ) )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowercase ( __lowerCAmelCase : Union[str, PreTrainedModel] , __lowerCAmelCase : Union[str, Path] = "student" , __lowerCAmelCase : Union[int, None] = None , __lowerCAmelCase : Union[int, None] = None , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Any , ):
a__ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
a__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F'teacher must be a model or string got type {type(__lowerCAmelCase )}'
a__ = teacher.config.to_diff_dict()
try:
a__ , a__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
a__ , a__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
a__ , a__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
a__ = teacher_e
if d is None:
a__ = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
a__ = teacher.config_class(**__lowerCAmelCase )
a__ = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
a__ = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
a__ , a__ = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
a__ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
a__ = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
a__ = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ (unittest.TestCase ):
def __init__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple=13 ,__snake_case :str=7 ,__snake_case :Tuple=True ,__snake_case :str=True ,__snake_case :Union[str, Any]=True ,__snake_case :List[str]=True ,__snake_case :Optional[int]=99 ,__snake_case :List[str]=32 ,__snake_case :Union[str, Any]=5 ,__snake_case :Union[str, Any]=4 ,__snake_case :int=37 ,__snake_case :List[Any]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Any=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Optional[int]=16 ,__snake_case :int=2 ,__snake_case :str=0.02 ,__snake_case :Optional[Any]=4 ,) -> List[str]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=__snake_case ,)
return config, input_ids, attention_mask
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__( self :int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('distilbert-base-uncased' )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
a__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
a__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ = model(__snake_case ,attention_mask=__snake_case )[0]
a__ = (1, 11, 7_68)
self.assertEqual(output.shape ,__snake_case )
a__ = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,__snake_case ,atol=1E-4 ) )
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
snake_case : int = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Dict:
super().__init__()
a__ = nn.ModuleList(__snake_case )
def lowerCamelCase__( self :int ,__snake_case :torch.FloatTensor ,__snake_case :Union[torch.Tensor, float, int] ,__snake_case :torch.Tensor ,__snake_case :List[torch.tensor] ,__snake_case :List[float] ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[Dict[str, Any]] = None ,__snake_case :bool = False ,__snake_case :bool = True ,) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__snake_case ,__snake_case ,self.nets ) ):
a__ , a__ = controlnet(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,)
# merge samples
if i == 0:
a__ , a__ = down_samples, mid_sample
else:
a__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__snake_case ,__snake_case )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, os.PathLike] ,__snake_case :bool = True ,__snake_case :Callable = None ,__snake_case :bool = False ,__snake_case :Optional[str] = None ,) -> List[str]:
a__ = 0
a__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__snake_case ,is_main_process=__snake_case ,save_function=__snake_case ,safe_serialization=__snake_case ,variant=__snake_case ,)
idx += 1
a__ = model_path_to_save + F'_{idx}'
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ,__snake_case :Optional[Union[str, os.PathLike]] ,**__snake_case :Tuple ) -> Optional[int]:
a__ = 0
a__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
a__ = pretrained_model_path
while os.path.isdir(__snake_case ):
a__ = ControlNetModel.from_pretrained(__snake_case ,**__snake_case )
controlnets.append(__snake_case )
idx += 1
a__ = pretrained_model_path + F'_{idx}'
logger.info(F'{len(__snake_case )} controlnets loaded from {pretrained_model_path}.' )
if len(__snake_case ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(__snake_case )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(__snake_case )
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :str ,__snake_case :Any ,__snake_case :List[Any]=7 ,__snake_case :Dict=3 ,__snake_case :Dict=10 ,__snake_case :Dict=18 ,__snake_case :Any=30 ,__snake_case :int=4_00 ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]=None ,__snake_case :Any=True ,__snake_case :int=[0.5, 0.5, 0.5] ,__snake_case :List[Any]=[0.5, 0.5, 0.5] ,__snake_case :Any=None ,) -> Union[str, Any]:
a__ = size if size is not None else {'shortest_edge': 18}
a__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = num_frames
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = crop_size
def lowerCamelCase__( self :List[str] ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = VivitImageProcessingTester(self )
@property
def lowerCamelCase__( self :List[str] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :str ) -> Dict:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'image_mean' ) )
self.assertTrue(hasattr(__snake_case ,'image_std' ) )
self.assertTrue(hasattr(__snake_case ,'do_normalize' ) )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
def lowerCamelCase__( self :Dict ) -> str:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def lowerCamelCase__( self :Any ) -> int:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :str ) -> Any:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :Tuple ) -> Any:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
a__ = 0
a__ = len(__lowerCAmelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
a__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
a__ = left
a__ = point
elif point > right:
a__ = right
a__ = point
else:
if item < current_item:
a__ = point - 1
else:
a__ = point + 1
return None
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCAmelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCAmelCase , __lowerCAmelCase , point + 1 , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : int ):
if collection != sorted(__lowerCAmelCase ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
snake_case : List[str] = 0
if debug == 1:
snake_case : Union[str, Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
snake_case : Optional[Any] = 67
snake_case : Union[str, Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print('''Not found''')
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = ['''torch''', '''torchsde''']
def __init__( self :Optional[int] ,*__snake_case :Optional[int] ,**__snake_case :Optional[int] ) -> Dict:
requires_backends(self ,['torch', 'torchsde'] )
@classmethod
def lowerCamelCase__( cls :Dict ,*__snake_case :Union[str, Any] ,**__snake_case :Union[str, Any] ) -> Tuple:
requires_backends(cls ,['torch', 'torchsde'] )
@classmethod
def lowerCamelCase__( cls :Any ,*__snake_case :List[Any] ,**__snake_case :Any ) -> Dict:
requires_backends(cls ,['torch', 'torchsde'] )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case : List[Any] = logging.getLogger(__name__)
def __lowercase ( __lowerCAmelCase : str ):
a__ = git.Repo(search_parent_directories=__lowerCAmelCase )
a__ = {
'repo_id': str(__lowerCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(__lowerCAmelCase , 'git_log.json' ) , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=4 )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if params.n_gpu <= 0:
a__ = 0
a__ = -1
a__ = True
a__ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
a__ = int(os.environ['WORLD_SIZE'] )
a__ = int(os.environ['N_GPU_NODE'] )
a__ = int(os.environ['RANK'] )
# number of nodes / node ID
a__ = params.world_size // params.n_gpu_per_node
a__ = params.global_rank // params.n_gpu_per_node
a__ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
a__ = 1
a__ = 0
a__ = 0
a__ = 0
a__ = 1
a__ = 1
a__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a__ = params.node_id == 0 and params.local_rank == 0
a__ = params.n_nodes > 1
# summary
a__ = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def __lowercase ( __lowerCAmelCase : Dict ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = '''facebook/bart-large-mnli'''
UpperCAmelCase__ : Optional[Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase__ : List[str] = '''text_classifier'''
UpperCAmelCase__ : List[str] = AutoTokenizer
UpperCAmelCase__ : Union[str, Any] = AutoModelForSequenceClassification
UpperCAmelCase__ : int = ['''text''', ['''text''']]
UpperCAmelCase__ : Dict = ['''text''']
def lowerCamelCase__( self :List[str] ) -> int:
super().setup()
a__ = self.model.config
a__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
a__ = int(__snake_case )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[str] ,__snake_case :Dict ) -> int:
a__ = labels
return self.pre_processor(
[text] * len(__snake_case ) ,[F'This example is {label}' for label in labels] ,return_tensors='pt' ,padding='max_length' ,)
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ) -> Union[str, Any]:
a__ = outputs.logits
a__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class snake_case_ (lowerCamelCase_ ):
@require_torch
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a__ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a__ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a__ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task='fill-mask' ,model=__snake_case )
# baseline - just load from_pretrained with normal network
a__ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a__ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ = '1'
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def lowerCamelCase__( self :Dict ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a__ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a__ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a__ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__snake_case )
BertModel.from_pretrained(__snake_case )
BertTokenizer.from_pretrained(__snake_case )
pipeline(task='fill-mask' ,model=__snake_case )
# baseline - just load from_pretrained with normal network
a__ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a__ = self.get_env()
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def lowerCamelCase__( self :Dict ) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a__ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a__ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a__ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a__ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a__ = self.get_env()
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
a__ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ = '1'
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def lowerCamelCase__( self :str ) -> int:
a__ = '\nfrom transformers import pipeline\n '
a__ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a__ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a__ = self.get_env()
a__ = '1'
a__ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def lowerCamelCase__( self :List[str] ) -> Any:
a__ = '\nfrom transformers import AutoModel\n '
a__ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a__ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a__ = self.get_env()
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ = '1'
a__ = subprocess.run(__snake_case ,env=__snake_case ,check=__snake_case ,capture_output=__snake_case )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : List[str] = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = '''convbert'''
def __init__( self :int ,__snake_case :str=3_05_22 ,__snake_case :Any=7_68 ,__snake_case :List[Any]=12 ,__snake_case :Tuple=12 ,__snake_case :Tuple=30_72 ,__snake_case :Tuple="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Any=5_12 ,__snake_case :int=2 ,__snake_case :List[Any]=0.02 ,__snake_case :List[Any]=1E-12 ,__snake_case :Union[str, Any]=1 ,__snake_case :Optional[Any]=0 ,__snake_case :Tuple=2 ,__snake_case :Tuple=7_68 ,__snake_case :Dict=2 ,__snake_case :str=9 ,__snake_case :int=1 ,__snake_case :int=None ,**__snake_case :List[str] ,) -> Optional[Any]:
super().__init__(
pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = embedding_size
a__ = head_ratio
a__ = conv_kernel_size
a__ = num_groups
a__ = classifier_dropout
class snake_case_ (lowerCamelCase_ ):
@property
def lowerCamelCase__( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import cva
import numpy as np
class snake_case_ :
def __init__( self :Tuple ,__snake_case :float ,__snake_case :int ) -> List[str]:
if k in (0.04, 0.06):
a__ = k
a__ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self :Optional[int] ) -> str:
return str(self.k )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ) -> tuple[cva.Mat, list[list[int]]]:
a__ = cva.imread(__snake_case ,0 )
a__ , a__ = img.shape
a__ = []
a__ = img.copy()
a__ = cva.cvtColor(__snake_case ,cva.COLOR_GRAY2RGB )
a__ , a__ = np.gradient(__snake_case )
a__ = dx**2
a__ = dy**2
a__ = dx * dy
a__ = 0.04
a__ = self.window_size // 2
for y in range(__snake_case ,h - offset ):
for x in range(__snake_case ,w - offset ):
a__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a__ = (wxx * wyy) - (wxy**2)
a__ = wxx + wyy
a__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
snake_case : int = HarrisCorner(0.04, 3)
snake_case , snake_case : Union[str, Any] = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : list ):
def merge(__lowerCAmelCase : list , __lowerCAmelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__lowerCAmelCase ) <= 1:
return collection
a__ = len(__lowerCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Any = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
import random
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
a__ = a[left_index]
a__ = left_index + 1
for j in range(left_index + 1 , __lowerCAmelCase ):
if a[j] < pivot:
a__ , a__ = a[i], a[j]
i += 1
a__ , a__ = a[i - 1], a[left_index]
return i - 1
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
if left < right:
a__ = random.randint(__lowerCAmelCase , right - 1 )
a__ , a__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
a__ = partition(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
quick_sort_random(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowerCAmelCase , pivot_index + 1 , __lowerCAmelCase ) # recursive quicksort to the right of the pivot point
def __lowercase ( ):
a__ = input('Enter numbers separated by a comma:\n' ).strip()
a__ = [int(__lowerCAmelCase ) for item in user_input.split(',' )]
quick_sort_random(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) )
print(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __lowercase ( ):
a__ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__lowerCAmelCase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__lowerCAmelCase , default=5 )
parser.add_argument('--batch_size' , type=__lowerCAmelCase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__lowerCAmelCase , default=1 )
parser.add_argument('--freeze' , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument('--learning_rate' , type=__lowerCAmelCase , default=5E-4 )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__lowerCAmelCase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__lowerCAmelCase , default=1_0 )
parser.add_argument('--weight_decay' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('--output_dir' , type=__lowerCAmelCase , default='./results' )
return parser.parse_args()
snake_case : Optional[int] = load('''accuracy''')
def __lowercase ( __lowerCAmelCase : Dict ):
a__ , a__ = eval_pred
a__ = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :Optional[Any] ) -> None:
super().__init__()
a__ = trainer
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :List[Any] ,__snake_case :Optional[int] ,**__snake_case :List[Any] ) -> Any:
if control.should_evaluate:
a__ = deepcopy(__snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='train' )
return control_copy
def __lowercase ( ):
a__ = get_args()
set_seed(args.seed )
a__ = load_dataset('codeparrot/codecomplex' , split='train' )
a__ = dataset.train_test_split(test_size=0.2 )
a__ = train_test['test'].train_test_split(test_size=0.5 )
a__ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
a__ = AutoTokenizer.from_pretrained(args.model_ckpt )
a__ = tokenizer.eos_token
a__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a__ = False
a__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__lowerCAmelCase : List[Any] ):
a__ = tokenizer(example['src'] , truncation=__lowerCAmelCase , max_length=1_0_2_4 )
a__ = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a__ = train_test_validation.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=train_test_validation['train'].column_names , )
a__ = DataCollatorWithPadding(tokenizer=__lowerCAmelCase )
a__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
a__ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
print('Training...' )
trainer.add_callback(CustomCallback(__lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
import os
def __lowercase ( __lowerCAmelCase : str = "input.txt" ):
with open(os.path.join(os.path.dirname(__lowerCAmelCase ) , __lowerCAmelCase ) ) as input_file:
a__ = [
[int(__lowerCAmelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
a__ = len(__lowerCAmelCase )
a__ = len(matrix[0] )
a__ = [[-1 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
a__ = matrix[i][0]
for j in range(1 , __lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
a__ = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __lowerCAmelCase ):
a__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a__ = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowercase ( __lowerCAmelCase : Union[str, Any]=None ):
if subparsers is not None:
a__ = subparsers.add_parser('test' )
else:
a__ = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=__lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
a__ = script_name
else:
a__ = F'--config_file={args.config_file} {script_name}'
a__ = ['accelerate-launch'] + test_args.split()
a__ = execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def __lowercase ( ):
a__ = test_command_parser()
a__ = parser.parse_args()
test_command(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ):
a__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
a__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
a__ = transform(__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
return image
def __lowercase ( __lowerCAmelCase : Optional[int] ):
if "visual_encoder" in key:
a__ = re.sub('visual_encoder*' , 'vision_model.encoder' , __lowerCAmelCase )
if "blocks" in key:
a__ = re.sub(R'blocks' , 'layers' , __lowerCAmelCase )
if "attn" in key:
a__ = re.sub(R'attn' , 'self_attn' , __lowerCAmelCase )
if "norm1" in key:
a__ = re.sub(R'norm1' , 'layer_norm1' , __lowerCAmelCase )
if "norm2" in key:
a__ = re.sub(R'norm2' , 'layer_norm2' , __lowerCAmelCase )
if "encoder.norm" in key:
a__ = re.sub(R'encoder.norm' , 'post_layernorm' , __lowerCAmelCase )
if "encoder.patch_embed.proj" in key:
a__ = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , __lowerCAmelCase )
if "encoder.pos_embed" in key:
a__ = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , __lowerCAmelCase )
if "encoder.cls_token" in key:
a__ = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , __lowerCAmelCase )
if "self_attn" in key:
a__ = re.sub(R'self_attn.proj' , 'self_attn.projection' , __lowerCAmelCase )
return key
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str]=None ):
if config_path is not None:
a__ = BlipConfig.from_pretrained(__lowerCAmelCase )
else:
a__ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
a__ = BlipForConditionalGeneration(__lowerCAmelCase ).eval()
a__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
a__ = blip_decoder(pretrained=__lowerCAmelCase , image_size=3_8_4 , vit='base' )
a__ = pt_model.eval()
a__ = pt_model.state_dict()
for key in modified_state_dict.copy():
a__ = modified_state_dict.pop(__lowerCAmelCase )
a__ = rename_key(__lowerCAmelCase )
a__ = value
hf_model.load_state_dict(__lowerCAmelCase )
a__ = 3_8_4
a__ = load_demo_image(image_size=__lowerCAmelCase , device='cpu' )
a__ = BertTokenizer.from_pretrained('bert-base-uncased' )
a__ = tokenizer(['a picture of'] ).input_ids
a__ = hf_model.generate(__lowerCAmelCase , __lowerCAmelCase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
a__ = hf_model.generate(__lowerCAmelCase )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
a__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
a__ = blip_vqa(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit='base' )
vqa_model.eval()
a__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
a__ = modified_state_dict.pop(__lowerCAmelCase )
a__ = rename_key(__lowerCAmelCase )
a__ = value
a__ = BlipForQuestionAnswering(__lowerCAmelCase )
hf_vqa_model.load_state_dict(__lowerCAmelCase )
a__ = ['How many dogs are in this image?']
a__ = tokenizer(__lowerCAmelCase , return_tensors='pt' ).input_ids
a__ = hf_vqa_model.generate(__lowerCAmelCase , __lowerCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
a__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
a__ = blip_itm(pretrained=__lowerCAmelCase , image_size=__lowerCAmelCase , vit='base' )
itm_model.eval()
a__ = itm_model.state_dict()
for key in modified_state_dict.copy():
a__ = modified_state_dict.pop(__lowerCAmelCase )
a__ = rename_key(__lowerCAmelCase )
a__ = value
a__ = BlipForImageTextRetrieval(__lowerCAmelCase )
a__ = ['A picture of a woman with a dog sitting in a beach']
a__ = tokenizer(
__lowerCAmelCase , return_tensors='pt' , padding='max_length' , truncation=__lowerCAmelCase , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__lowerCAmelCase )
hf_itm_model.eval()
a__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
a__ = hf_itm_model(__lowerCAmelCase , __lowerCAmelCase , use_itm_head=__lowerCAmelCase )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
snake_case : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
snake_case : Union[str, Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Union[str, Any] ,*__snake_case :Dict ,**__snake_case :Union[str, Any] ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' ,__snake_case ,)
super().__init__(*__snake_case ,**__snake_case )
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :Optional[int] ,__snake_case :Optional[Any] ,__snake_case :str=7 ,__snake_case :int=3 ,__snake_case :Dict=30 ,__snake_case :List[str]=4_00 ,__snake_case :Optional[Any]=True ,__snake_case :List[Any]=None ,__snake_case :List[str]=True ,__snake_case :List[Any]=[0.5, 0.5, 0.5] ,__snake_case :List[str]=[0.5, 0.5, 0.5] ,__snake_case :Optional[int]=True ,__snake_case :str=1 / 2_55 ,__snake_case :int=True ,) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
a__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = do_rescale
a__ = rescale_factor
a__ = do_pad
def lowerCamelCase__( self :Tuple ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase__( self :str ,__snake_case :Dict ,__snake_case :List[str]=False ) -> List[Any]:
if not batched:
a__ = image_inputs[0]
if isinstance(__snake_case ,Image.Image ):
a__ , a__ = image.size
else:
a__ , a__ = image.shape[1], image.shape[2]
if w < h:
a__ = int(self.size['shortest_edge'] * h / w )
a__ = self.size['shortest_edge']
elif w > h:
a__ = self.size['shortest_edge']
a__ = int(self.size['shortest_edge'] * w / h )
else:
a__ = self.size['shortest_edge']
a__ = self.size['shortest_edge']
else:
a__ = []
for image in image_inputs:
a__ , a__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ = max(__snake_case ,key=lambda __snake_case : item[0] )[0]
a__ = max(__snake_case ,key=lambda __snake_case : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCamelCase__( self :List[str] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'image_mean' ) )
self.assertTrue(hasattr(__snake_case ,'image_std' ) )
self.assertTrue(hasattr(__snake_case ,'do_normalize' ) )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
def lowerCamelCase__( self :int ) -> str:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad ,__snake_case )
a__ = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__snake_case )
self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> Union[str, Any]:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,Image.Image )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,np.ndarray )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCamelCase__( self :int ) -> Tuple:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case ,torch.Tensor )
# Test not batched input
a__ = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
a__ , a__ = self.image_processor_tester.get_expected_values(__snake_case ,batched=__snake_case )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
# prepare image and target
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
a__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
a__ = image_processing(images=__snake_case ,annotations=__snake_case ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,__snake_case )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__snake_case ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__snake_case ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__snake_case )
a__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__snake_case ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__snake_case ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__snake_case ) )
# verify class_labels
a__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__snake_case ) )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__snake_case ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__snake_case ) )
@slow
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# prepare image, target and masks_path
a__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
a__ = json.loads(f.read() )
a__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
a__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
a__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
a__ = image_processing(images=__snake_case ,annotations=__snake_case ,masks_path=__snake_case ,return_tensors='pt' )
# verify pixel values
a__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape ,__snake_case )
a__ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,__snake_case ,atol=1E-4 ) )
# verify area
a__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,__snake_case ) )
# verify boxes
a__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,__snake_case )
a__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,__snake_case ,atol=1E-3 ) )
# verify image_id
a__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,__snake_case ) )
# verify is_crowd
a__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,__snake_case ) )
# verify class_labels
a__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,__snake_case ) )
# verify masks
a__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,__snake_case )
# verify orig_size
a__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,__snake_case ) )
# verify size
a__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,__snake_case ) )
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
from __future__ import annotations
import math
class snake_case_ :
def __init__( self :Tuple ,__snake_case :int ) -> None:
a__ = size
# approximate the overall size of segment tree with given value
a__ = [0 for i in range(0 ,4 * size )]
# create array to store lazy update
a__ = [0 for i in range(0 ,4 * size )]
a__ = [0 for i in range(0 ,4 * size )] # flag for lazy update
def lowerCamelCase__( self :Tuple ,__snake_case :int ) -> int:
return idx * 2
def lowerCamelCase__( self :Any ,__snake_case :int ) -> int:
return idx * 2 + 1
def lowerCamelCase__( self :Any ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :list[int] ) -> None:
if left_element == right_element:
a__ = a[left_element - 1]
else:
a__ = (left_element + right_element) // 2
self.build(self.left(__snake_case ) ,__snake_case ,__snake_case ,__snake_case )
self.build(self.right(__snake_case ) ,mid + 1 ,__snake_case ,__snake_case )
a__ = max(
self.segment_tree[self.left(__snake_case )] ,self.segment_tree[self.right(__snake_case )] )
def lowerCamelCase__( self :Tuple ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ) -> bool:
if self.flag[idx] is True:
a__ = self.lazy[idx]
a__ = False
if left_element != right_element:
a__ = self.lazy[idx]
a__ = self.lazy[idx]
a__ = True
a__ = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
a__ = val
if left_element != right_element:
a__ = val
a__ = val
a__ = True
a__ = True
return True
a__ = (left_element + right_element) // 2
self.update(self.left(__snake_case ) ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
self.update(self.right(__snake_case ) ,mid + 1 ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = max(
self.segment_tree[self.left(__snake_case )] ,self.segment_tree[self.right(__snake_case )] )
return True
def lowerCamelCase__( self :Dict ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ,__snake_case :int ) -> int | float:
if self.flag[idx] is True:
a__ = self.lazy[idx]
a__ = False
if left_element != right_element:
a__ = self.lazy[idx]
a__ = self.lazy[idx]
a__ = True
a__ = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
a__ = (left_element + right_element) // 2
a__ = self.query(self.left(__snake_case ) ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
a__ = self.query(self.right(__snake_case ) ,mid + 1 ,__snake_case ,__snake_case ,__snake_case )
return max(__snake_case ,__snake_case )
def __str__( self :Tuple ) -> str:
return str([self.query(1 ,1 ,self.size ,__snake_case ,__snake_case ) for i in range(1 ,self.size + 1 )] )
if __name__ == "__main__":
snake_case : List[str] = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
snake_case : Any = 15
snake_case : List[str] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
a__ = [[float('inf' ) for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
a__ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCAmelCase ):
# looping through rows of graph array
for i in range(__lowerCAmelCase ):
# looping through columns of graph array
for j in range(__lowerCAmelCase ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
a__ = dist[i][k] + dist[k][j]
_print_dist(__lowerCAmelCase , __lowerCAmelCase )
return dist, v
if __name__ == "__main__":
snake_case : int = int(input('''Enter number of vertices: '''))
snake_case : List[str] = int(input('''Enter number of edges: '''))
snake_case : Optional[int] = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
snake_case : Dict = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
snake_case : Union[str, Any] = int(input('''Enter source:'''))
snake_case : Optional[int] = int(input('''Enter destination:'''))
snake_case : Optional[int] = float(input('''Enter weight:'''))
snake_case : Dict = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : str = 1
@register_to_config
def __init__( self :int ,__snake_case :Any=20_00 ,__snake_case :str=0.1 ,__snake_case :List[str]=20 ,__snake_case :Dict=1E-3 ) -> List[str]:
a__ = None
a__ = None
a__ = None
def lowerCamelCase__( self :Dict ,__snake_case :List[Any] ,__snake_case :Union[str, torch.device] = None ) -> Tuple:
a__ = torch.linspace(1 ,self.config.sampling_eps ,__snake_case ,device=__snake_case )
def lowerCamelCase__( self :str ,__snake_case :str ,__snake_case :str ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any]=None ) -> Union[str, Any]:
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
a__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
a__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
a__ = std.flatten()
while len(std.shape ) < len(score.shape ):
a__ = std.unsqueeze(-1 )
a__ = -score / std
# compute
a__ = -1.0 / len(self.timesteps )
a__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
a__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
a__ = beta_t.unsqueeze(-1 )
a__ = -0.5 * beta_t * x
a__ = torch.sqrt(__snake_case )
a__ = drift - diffusion**2 * score
a__ = x + drift * dt
# add noise
a__ = randn_tensor(x.shape ,layout=x.layout ,generator=__snake_case ,device=x.device ,dtype=x.dtype )
a__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :Union[str, Any] ) -> Optional[int]:
return self.config.num_train_timesteps
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
snake_case : Any = logging.get_logger(__name__)
snake_case : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
snake_case : Any = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
for attribute in key.split('.' ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
a__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
a__ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
a__ = True
if "*" in mapped_key:
a__ = name.split(__lowerCAmelCase )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
a__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=True ):
if config_path is not None:
a__ = UniSpeechSatConfig.from_pretrained(__lowerCAmelCase )
else:
a__ = UniSpeechSatConfig()
a__ = ''
if is_finetuned:
a__ = UniSpeechSatForCTC(__lowerCAmelCase )
else:
a__ = UniSpeechSatForPreTraining(__lowerCAmelCase )
a__ , a__ , a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
a__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : bytes ):
return "".join([hex(__lowerCAmelCase )[2:].zfill(2 ).upper() for byte in list(__lowerCAmelCase )] )
def __lowercase ( __lowerCAmelCase : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(__lowerCAmelCase ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__lowerCAmelCase ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(__lowerCAmelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : str ):
return " ".join(
''.join(word[::-1] ) if len(__lowerCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = DDIMPipeline
UpperCAmelCase__ : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : Any = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
UpperCAmelCase__ : List[str] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ : Any = False
def lowerCamelCase__( self :List[Any] ) -> Any:
torch.manual_seed(0 )
a__ = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
a__ = DDIMScheduler()
a__ = {'unet': unet, 'scheduler': scheduler}
return components
def lowerCamelCase__( self :str ,__snake_case :int ,__snake_case :List[Any]=0 ) -> List[str]:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = 'cpu'
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = self.get_dummy_inputs(__snake_case )
a__ = pipe(**__snake_case ).images
a__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 32, 32, 3) )
a__ = np.array(
[1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] )
a__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case ,1E-3 )
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase__( self :List[Any] ) -> List[str]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase__( self :str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> Any:
a__ = 'google/ddpm-cifar10-32'
a__ = UNetaDModel.from_pretrained(__snake_case )
a__ = DDIMScheduler()
a__ = DDIMPipeline(unet=__snake_case ,scheduler=__snake_case )
ddim.to(__snake_case )
ddim.set_progress_bar_config(disable=__snake_case )
a__ = torch.manual_seed(0 )
a__ = ddim(generator=__snake_case ,eta=0.0 ,output_type='numpy' ).images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a__ = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :Tuple ) -> int:
a__ = 'google/ddpm-ema-bedroom-256'
a__ = UNetaDModel.from_pretrained(__snake_case )
a__ = DDIMScheduler.from_pretrained(__snake_case )
a__ = DDIMPipeline(unet=__snake_case ,scheduler=__snake_case )
ddpm.to(__snake_case )
ddpm.set_progress_bar_config(disable=__snake_case )
a__ = torch.manual_seed(0 )
a__ = ddpm(generator=__snake_case ,output_type='numpy' ).images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
a__ = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
snake_case : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
snake_case : List[Any] = get_tests_dir('''fixtures/vocab.json''')
snake_case : Optional[Any] = get_tests_dir('''fixtures''')
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def lowerCamelCase__( self :str ) -> Optional[Any]:
a__ = 0
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case ,os.path.join(__snake_case ,__snake_case ) )
copyfile(__snake_case ,os.path.join(__snake_case ,'vocab.json' ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__snake_case ,__snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case ,__snake_case ) ,'r' ) as f:
a__ = json.load(__snake_case )
config_dict.pop('processor_class' )
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write(json.dumps(__snake_case ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__snake_case ,__snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case ,__snake_case ) ,'r' ) as f:
a__ = json.load(__snake_case )
config_dict.pop('processor_class' )
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write(json.dumps(__snake_case ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case ,os.path.join(__snake_case ,'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write('{}' )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
a__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
a__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
a__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def lowerCamelCase__( self :Tuple ) -> Any:
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case ,__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case ,__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__snake_case ,'vocab.txt' )
with open(__snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__snake_case )
a__ = CustomProcessor(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :str ) -> Dict:
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = False
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = False
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''AutoFeatureExtractor'''
UpperCAmelCase__ : List[str] = '''AutoTokenizer'''
UpperCAmelCase__ : Optional[int] = False
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case ,__snake_case )
# If remote code is not set, the default is to use local classes.
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :List[str] ) -> List[str]:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ ,'BertTokenizerFast' )
def lowerCamelCase__( self :Dict ) -> int:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ ,'ConvNextImageProcessor' )
@is_staging_test
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> Tuple:
a__ = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case ,'test-processor' ) ,push_to_hub=__snake_case ,use_auth_token=self._token )
a__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case ,getattr(new_processor.feature_extractor ,__snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case ,'test-processor-org' ) ,push_to_hub=__snake_case ,use_auth_token=self._token ,organization='valid_org' ,)
a__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case ,getattr(new_processor.feature_extractor ,__snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :int ) -> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__snake_case ,'vocab.txt' )
with open(__snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__snake_case )
a__ = CustomProcessor(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' ,token=self._token )
a__ = Repository(__snake_case ,clone_from=F'{USER}/test-dynamic-processor' ,token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ) as f:
a__ = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config['auto_map'] ,{
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_processing.py' ) ) )
repo.push_to_hub()
a__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' ,trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,'CustomProcessor' )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = 0
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
a__ = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__snake_case ) / 'preprocessor_config.json'
a__ = Path(__snake_case ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(__snake_case ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(__snake_case ,'w' ) )
a__ = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Dict:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__snake_case ) / 'preprocessor_config.json'
a__ = Path(__snake_case ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(__snake_case ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(__snake_case ,'w' ) )
a__ = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ = Path(__snake_case ) / 'preprocessor_config.json'
a__ = Path(__snake_case ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(__snake_case ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(__snake_case ,'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop('image_processor_type' )
a__ = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
a__ = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
a__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__snake_case ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} ,open(__snake_case ,'w' ) ,)
a__ = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Any ) -> Any:
with self.assertRaisesRegex(
__snake_case ,'clip-base is not a local folder and is not a valid model identifier' ):
a__ = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCamelCase__( self :List[str] ) -> str:
with self.assertRaisesRegex(
__snake_case ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a__ = AutoImageProcessor.from_pretrained(__snake_case ,revision='aaaaaa' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[Any]:
with self.assertRaisesRegex(
__snake_case ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
a__ = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase__( self :Any ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
a__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
a__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=__snake_case )
a__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
a__ = AutoImageProcessor.from_pretrained(__snake_case ,trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,'NewImageProcessor' )
def lowerCamelCase__( self :Dict ) -> int:
try:
AutoConfig.register('custom' ,__snake_case )
AutoImageProcessor.register(__snake_case ,__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = Path(__snake_case ) / 'preprocessor_config.json'
a__ = Path(__snake_case ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} ,open(__snake_case ,'w' ) ,)
json.dump({'model_type': 'clip'} ,open(__snake_case ,'w' ) )
a__ = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
a__ = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :int ) -> int:
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = True
try:
AutoConfig.register('custom' ,__snake_case )
AutoImageProcessor.register(__snake_case ,__snake_case )
# If remote code is not set, the default is to use local
a__ = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' ,trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ ,'NewImageProcessor' )
self.assertTrue(not hasattr(__snake_case ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
from __future__ import annotations
import math
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : bool , __lowerCAmelCase : list[int] , __lowerCAmelCase : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(__lowerCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , )
def __lowercase ( ):
a__ = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
a__ = math.log(len(__lowerCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
snake_case : str = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=__lowerCAmelCase )
@dataclass
class snake_case_ :
UpperCAmelCase__ : List[str] = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
UpperCAmelCase__ : List[int] = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
UpperCAmelCase__ : List[int] = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Benchmark training of model'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Verbose memory tracing'''} )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Trace memory line by line'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Save result to a CSV file'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Save all print statements in a log file'''} )
UpperCAmelCase__ : bool = field(default=lowerCamelCase_ , metadata={'''help''': '''Whether to print environment information'''} )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
UpperCAmelCase__ : str = field(
default=f'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
UpperCAmelCase__ : str = field(
default=f'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
UpperCAmelCase__ : str = field(
default=f'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
UpperCAmelCase__ : str = field(
default=f'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
UpperCAmelCase__ : str = field(
default=f'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
UpperCAmelCase__ : str = field(
default=f'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
UpperCAmelCase__ : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
UpperCAmelCase__ : bool = field(
default=lowerCamelCase_ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def lowerCamelCase__( self :List[str] ) -> str:
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' ,__snake_case ,)
def lowerCamelCase__( self :Dict ) -> Dict:
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def lowerCamelCase__( self :List[Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def lowerCamelCase__( self :Dict ) -> Optional[int]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = F'Input value of [number={number}] must be an integer'
raise TypeError(__lowerCAmelCase )
if number < 0:
return False
a__ = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Any = {'''vocab_file''': '''spiece.model'''}
snake_case : Optional[Any] = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
snake_case : Any = {'''bert_for_seq_generation''': 5_12}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self :Tuple ,__snake_case :List[str] ,__snake_case :int="<s>" ,__snake_case :Tuple="</s>" ,__snake_case :List[Any]="<unk>" ,__snake_case :Optional[Any]="<pad>" ,__snake_case :Union[str, Any]="<::::>" ,__snake_case :Optional[Dict[str, Any]] = None ,**__snake_case :List[Any] ,) -> None:
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,pad_token=__snake_case ,sep_token=__snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**__snake_case ,)
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> Dict:
return self.sp_model.get_piece_size()
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :str ) -> str:
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self :Optional[int] ,__snake_case :str ) -> Any:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__( self :List[Any] ,__snake_case :str ) -> List[str]:
return self.sp_model.encode(__snake_case ,out_type=__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Optional[Any] ) -> int:
return self.sp_model.piece_to_id(__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :List[str] ) -> Tuple:
a__ = self.sp_model.IdToPiece(__snake_case )
return token
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ) -> List[str]:
a__ = []
a__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
a__ = []
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowerCamelCase__( self :Any ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case : int = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ):
a__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return image
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a__ = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
a__ = qkv_bias
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = 3_6_4 if 'coco' in model_name else 2_2_4
a__ = InstructBlipVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
a__ = InstructBlipConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase , qformer_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[Any]=False ):
a__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
a__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
a__ , a__ = get_blipa_config(__lowerCAmelCase )
a__ = InstructBlipForConditionalGeneration(__lowerCAmelCase ).eval()
a__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
a__ , a__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
a__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
a__ , a__ , a__ = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
a__ = original_model.state_dict()
a__ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ = state_dict.pop(__lowerCAmelCase )
if key.startswith('Qformer.bert' ):
a__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
a__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
a__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
a__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
a__ = key.replace('t5' , 'language' )
a__ = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
a__ = load_demo_image()
a__ = 'What is unusual about this image?'
# create processor
a__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
a__ = InstructBlipProcessor(
image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase , )
a__ = processor(images=__lowerCAmelCase , text=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
a__ = vis_processors['eval'](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
a__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
a__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
a__ = hf_model(**__lowerCAmelCase ).logits
else:
a__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
a__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
a__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
a__ = hf_model(**__lowerCAmelCase , labels=__lowerCAmelCase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __lowerCAmelCase , atol=__lowerCAmelCase )
print('Looks ok!' )
print('Generating with original model...' )
a__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
a__ = hf_model.generate(
**__lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ = 2
print('Original generation:' , __lowerCAmelCase )
a__ = processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
a__ = [text.strip() for text in output_text]
print('HF generation:' , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
snake_case : Optional[int] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : str = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self :Dict ,__snake_case :int ,__snake_case :int ,__snake_case :Optional[int] = None ,__snake_case :int = 5_02_57 ,__snake_case :int = 10_24 ,__snake_case :int = 7_68 ,__snake_case :int = 12 ,__snake_case :int = 12 ,__snake_case :Optional[int] = None ,__snake_case :str = "gelu_new" ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 1E-5 ,__snake_case :float = 0.02 ,__snake_case :bool = True ,__snake_case :bool = True ,__snake_case :bool = False ,__snake_case :bool = False ,) -> int:
super().__init__()
a__ = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'
F' `n_embd`: {n_embd} are not equal.' )
a__ = prefix_inner_dim
a__ = prefix_hidden_dim
a__ = (
nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
a__ = (
nn.Linear(self.prefix_hidden_dim ,__snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
a__ = GPTaConfig(
vocab_size=__snake_case ,n_positions=__snake_case ,n_embd=__snake_case ,n_layer=__snake_case ,n_head=__snake_case ,n_inner=__snake_case ,activation_function=__snake_case ,resid_pdrop=__snake_case ,embd_pdrop=__snake_case ,attn_pdrop=__snake_case ,layer_norm_epsilon=__snake_case ,initializer_range=__snake_case ,scale_attn_weights=__snake_case ,use_cache=__snake_case ,scale_attn_by_inverse_layer_idx=__snake_case ,reorder_and_upcast_attn=__snake_case ,)
a__ = GPTaLMHeadModel(__snake_case )
def lowerCamelCase__( self :List[Any] ,__snake_case :torch.Tensor ,__snake_case :torch.Tensor ,__snake_case :Optional[torch.Tensor] = None ,__snake_case :Optional[torch.Tensor] = None ,) -> Union[str, Any]:
a__ = self.transformer.transformer.wte(__snake_case )
a__ = self.encode_prefix(__snake_case )
a__ = self.decode_prefix(__snake_case )
a__ = torch.cat((prefix_embeds, embedding_text) ,dim=1 )
if labels is not None:
a__ = self.get_dummy_token(input_ids.shape[0] ,input_ids.device )
a__ = torch.cat((dummy_token, input_ids) ,dim=1 )
a__ = self.transformer(inputs_embeds=__snake_case ,labels=__snake_case ,attention_mask=__snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :torch.device ) -> torch.Tensor:
return torch.zeros(__snake_case ,self.prefix_length ,dtype=torch.intaa ,device=__snake_case )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[Any] ) -> Tuple:
return self.encode_prefix(__snake_case )
@torch.no_grad()
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :Any ) -> Dict:
a__ = torch.split(__snake_case ,1 ,dim=0 )
a__ = []
a__ = []
for feature in features:
a__ = self.decode_prefix(feature.to(__snake_case ) ) # back to the clip feature
# Only support beam search for now
a__ , a__ = self.generate_beam(
input_embeds=__snake_case ,device=__snake_case ,eos_token_id=__snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
a__ = torch.stack(__snake_case )
a__ = torch.stack(__snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase__( self :Any ,__snake_case :List[Any]=None ,__snake_case :Tuple=None ,__snake_case :Optional[int]=None ,__snake_case :int = 5 ,__snake_case :int = 67 ,__snake_case :float = 1.0 ,__snake_case :Optional[int] = None ,) -> List[str]:
a__ = eos_token_id
a__ = None
a__ = None
a__ = torch.ones(__snake_case ,device=__snake_case ,dtype=torch.int )
a__ = torch.zeros(__snake_case ,device=__snake_case ,dtype=torch.bool )
if input_embeds is not None:
a__ = input_embeds
else:
a__ = self.transformer.transformer.wte(__snake_case )
for i in range(__snake_case ):
a__ = self.transformer(inputs_embeds=__snake_case )
a__ = outputs.logits
a__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
a__ = logits.softmax(-1 ).log()
if scores is None:
a__ , a__ = logits.topk(__snake_case ,-1 )
a__ = generated.expand(__snake_case ,*generated.shape[1:] )
a__ , a__ = next_tokens.permute(1 ,0 ), scores.squeeze(0 )
if tokens is None:
a__ = next_tokens
else:
a__ = tokens.expand(__snake_case ,*tokens.shape[1:] )
a__ = torch.cat((tokens, next_tokens) ,dim=1 )
else:
a__ = -float(np.inf )
a__ = 0
a__ = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
a__ = scores_sum / seq_lengths[:, None]
a__ , a__ = scores_sum_average.view(-1 ).topk(__snake_case ,-1 )
a__ = next_tokens // scores_sum.shape[1]
a__ = seq_lengths[next_tokens_source]
a__ = next_tokens % scores_sum.shape[1]
a__ = next_tokens.unsqueeze(1 )
a__ = tokens[next_tokens_source]
a__ = torch.cat((tokens, next_tokens) ,dim=1 )
a__ = generated[next_tokens_source]
a__ = scores_sum_average * seq_lengths
a__ = is_stopped[next_tokens_source]
a__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 )
a__ = torch.cat((generated, next_token_embed) ,dim=1 )
a__ = is_stopped + next_tokens.eq(__snake_case ).squeeze()
if is_stopped.all():
break
a__ = scores / seq_lengths
a__ = scores.argsort(descending=__snake_case )
# tokens tensors are already padded to max_seq_length
a__ = [tokens[i] for i in order]
a__ = torch.stack(__snake_case ,dim=0 )
a__ = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Any = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''time_series_transformer'''
UpperCAmelCase__ : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self :Any ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :str = "student_t" ,__snake_case :str = "nll" ,__snake_case :int = 1 ,__snake_case :List[int] = [1, 2, 3, 4, 5, 6, 7] ,__snake_case :Optional[Union[str, bool]] = "mean" ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :Optional[List[int]] = None ,__snake_case :Optional[List[int]] = None ,__snake_case :int = 32 ,__snake_case :int = 32 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :bool = True ,__snake_case :str = "gelu" ,__snake_case :int = 64 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :int = 1_00 ,__snake_case :float = 0.02 ,__snake_case :Optional[int]=True ,**__snake_case :Optional[Any] ,) -> str:
# time series specific configuration
a__ = prediction_length
a__ = context_length or prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a__ = cardinality
else:
a__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a__ = embedding_dimension
else:
a__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(__snake_case ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[str] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/config.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/config.json'''
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''fnet'''
def __init__( self :str ,__snake_case :List[str]=3_20_00 ,__snake_case :Union[str, Any]=7_68 ,__snake_case :List[Any]=12 ,__snake_case :Tuple=30_72 ,__snake_case :List[Any]="gelu_new" ,__snake_case :int=0.1 ,__snake_case :int=5_12 ,__snake_case :Dict=4 ,__snake_case :Dict=0.02 ,__snake_case :Tuple=1E-12 ,__snake_case :List[str]=False ,__snake_case :Any=5_12 ,__snake_case :List[Any]=3 ,__snake_case :Dict=1 ,__snake_case :Optional[int]=2 ,**__snake_case :Optional[Any] ,) -> List[Any]:
super().__init__(pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = use_tpu_fourier_optimizations
a__ = tpu_short_seq_length
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] ):
a__ = state_dict.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
a__ = value
else:
a__ = value
return new_state_dict
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:2_5_6, :]
a__ = in_proj_bias[:2_5_6]
a__ = in_proj_weight[2_5_6:5_1_2, :]
a__ = in_proj_bias[2_5_6:5_1_2]
a__ = in_proj_weight[-2_5_6:, :]
a__ = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[:2_5_6, :]
a__ = in_proj_bias[:2_5_6]
a__ = in_proj_weight[2_5_6:5_1_2, :]
a__ = in_proj_bias[2_5_6:5_1_2]
a__ = in_proj_weight[-2_5_6:, :]
a__ = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
a__ = state_dict.pop(
F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
a__ = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
a__ = in_proj_weight_cross_attn[:2_5_6, :]
a__ = in_proj_bias_cross_attn[:2_5_6]
a__ = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
a__ = in_proj_bias_cross_attn[2_5_6:5_1_2]
a__ = in_proj_weight_cross_attn[-2_5_6:, :]
a__ = in_proj_bias_cross_attn[-2_5_6:]
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str ):
a__ , a__ = image.size
a__ = max(__lowerCAmelCase , __lowerCAmelCase )
a__ = 8_0_0 if 'detection' in checkpoint_url else 1_0_0_0
a__ = target_max_size / current_max_size
a__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowercase ( __lowerCAmelCase : str ):
a__ = F.to_tensor(__lowerCAmelCase )
a__ = F.normalize(__lowerCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
logger.info('Converting model...' )
# load original state dict
a__ = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
a__ = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
a__ = state_dict.pop(__lowerCAmelCase )
a__ = val
# create HuggingFace model and load state dict
a__ = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
a__ = 1_5
a__ = 2
a__ = {0: 'table', 1: 'table rotated'}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
else:
a__ = 1_2_5
a__ = 6
a__ = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = DetrImageProcessor(
format='coco_detection' , max_size=8_0_0 if 'detection' in checkpoint_url else 1_0_0_0 )
a__ = TableTransformerForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify our conversion
a__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
a__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=__lowerCAmelCase )
a__ = Image.open(__lowerCAmelCase ).convert('RGB' )
a__ = normalize(resize(__lowerCAmelCase , __lowerCAmelCase ) ).unsqueeze(0 )
a__ = model(__lowerCAmelCase )
if "detection" in checkpoint_url:
a__ = (1, 1_5, 3)
a__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
a__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
a__ = (1, 1_2_5, 7)
a__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
a__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
a__ = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(__lowerCAmelCase )
image_processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case : Tuple = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_euler' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe([prompt] ,generator=__snake_case ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :str ) -> Any:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_euler' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe([prompt] ,generator=__snake_case ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase__( self :Dict ) -> Tuple:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe(
[prompt] ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type='np' ,use_karras_sigmas=__snake_case ,)
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ):
# Initialise PyTorch model
a__ = AlbertConfig.from_json_file(__lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
a__ = AlbertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
snake_case : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : str = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = ['''PoolFormerFeatureExtractor''']
snake_case : Optional[int] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
class snake_case_ : # Public class to implement a graph
def __init__( self :Union[str, Any] ,__snake_case :int ,__snake_case :int ,__snake_case :list[list[bool]] ) -> None:
a__ = row
a__ = col
a__ = graph
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :int ,__snake_case :list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCamelCase__( self :str ,__snake_case :int ,__snake_case :int ,__snake_case :list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
a__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
a__ = [-1, 0, 1, -1, 1, -1, 0, 1]
a__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,__snake_case ):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,__snake_case )
def lowerCamelCase__( self :Any ) -> int: # And finally, count all islands.
a__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
a__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__snake_case ,__snake_case ,__snake_case )
count += 1
return count
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from ..utils import DummyObject, requires_backends
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : List[str] = ['''speech''']
def __init__( self :Any ,*__snake_case :Dict ,**__snake_case :List[str] ) -> Dict:
requires_backends(self ,['speech'] )
class snake_case_ (metaclass=lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''speech''']
def __init__( self :Optional[Any] ,*__snake_case :List[Any] ,**__snake_case :Optional[Any] ) -> Union[str, Any]:
requires_backends(self ,['speech'] )
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case : Any = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
snake_case : Dict = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
snake_case : Union[str, Any] = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
snake_case : Union[str, Any] = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case : List[str] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
snake_case : Union[str, Any] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
for tf_name, hf_name in patterns:
a__ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
return k
def __lowercase ( __lowerCAmelCase : dict , __lowerCAmelCase : dict ):
a__ = BigBirdPegasusConfig(**__lowerCAmelCase )
a__ = BigBirdPegasusForConditionalGeneration(__lowerCAmelCase )
a__ = torch_model.state_dict()
a__ = {}
# separating decoder weights
a__ = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
a__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
a__ = [k.endswith(__lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCAmelCase ):
continue
a__ = DECODER_PATTERNS
a__ = rename_state_dict_key(__lowerCAmelCase , __lowerCAmelCase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
a__ = v.T
a__ = torch.from_numpy(__lowerCAmelCase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
a__ = [k.endswith(__lowerCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(__lowerCAmelCase ):
continue
a__ = REMAINING_PATTERNS
a__ = rename_state_dict_key(__lowerCAmelCase , __lowerCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
a__ = v.T
a__ = torch.from_numpy(__lowerCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
a__ = mapping['model.embed_positions.weight']
a__ = mapping.pop('model.embed_positions.weight' )
a__ , a__ = torch_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
a__ = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = tf.train.list_variables(__lowerCAmelCase )
a__ = {}
a__ = ['global_step']
for name, shape in tqdm(__lowerCAmelCase , desc='converting tf checkpoint to dict' ):
a__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
a__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
a__ = array
return tf_weights
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict ):
a__ = get_tf_weights_as_numpy(__lowerCAmelCase )
a__ = convert_bigbird_pegasus(__lowerCAmelCase , __lowerCAmelCase )
torch_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case : Dict = parser.parse_args()
snake_case : List[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : int = logging.get_logger(__name__)
snake_case : Dict = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __lowercase ( __lowerCAmelCase : List[Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a__ = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith('encoder' ):
a__ = k.replace('.attn' , '.self_attn' )
a__ = k.replace('norm1' , 'self_attn_layer_norm' )
a__ = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
a__ = k.replace('norm1' , 'self_attn_layer_norm' )
a__ = k.replace('norm2' , 'encoder_attn_layer_norm' )
a__ = k.replace('norm3' , 'final_layer_norm' )
return k
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
a__ = sd.pop(__lowerCAmelCase )
a__ = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
a__ = v
snake_case : List[Any] = ['''START''']
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any ):
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = model['model']
a__ = BlenderbotConfig.from_json_file(__lowerCAmelCase )
a__ = BlenderbotForConditionalGeneration(__lowerCAmelCase )
a__ = m.model.state_dict().keys()
a__ = []
a__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a__ = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
snake_case : Any = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :List[Any] ) -> Union[str, Any]:
a__ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
a__ = {
'input_ids': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] ,dtype=tf.intaa ), # "My dog is cute"
'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ),
}
a__ = model(__snake_case )['last_hidden_state']
a__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape ,__snake_case )
# compare the actual values for a slice.
a__ = tf.convert_to_tensor(
[
[
[0.0_68_17_62, 0.10_89_44_51, 0.06_77_25_04],
[-0.06_42_36_68, 0.02_36_66_15, 0.04_32_93_44],
[-0.06_05_72_95, 0.09_97_41_35, -0.00_07_05_84],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
snake_case : str = ['''small''', '''medium''', '''large''']
snake_case : Tuple = '''lm_head.decoder.weight'''
snake_case : Dict = '''lm_head.weight'''
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
a__ = torch.load(__lowerCAmelCase )
a__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
snake_case : List[Any] = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
snake_case : List[str] = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
snake_case : Dict = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 657 |
def __lowercase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ):
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
a__ = [p / w for p, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
a__ = sorted(__lowerCAmelCase )
# declaring useful variables
a__ = len(__lowerCAmelCase )
a__ = 0
a__ = 0
a__ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
a__ = sorted_profit_by_weight[length - i - 1]
a__ = profit_by_weight.index(__lowerCAmelCase )
a__ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
snake_case : Tuple = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
snake_case : Optional[int] = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
snake_case : List[str] = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 657 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Optional[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class snake_case_ :
def __init__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[Any]=14 ,__snake_case :Dict=7 ,__snake_case :Optional[int]=True ,__snake_case :Optional[int]=True ,__snake_case :Dict=True ,__snake_case :List[Any]=True ,__snake_case :Optional[int]=True ,__snake_case :Any=99 ,__snake_case :List[str]=32 ,__snake_case :List[str]=5 ,__snake_case :Tuple=4 ,__snake_case :Optional[int]=37 ,__snake_case :Optional[int]="gelu" ,__snake_case :Tuple=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Dict=5_12 ,__snake_case :Union[str, Any]=16 ,__snake_case :str=2 ,__snake_case :Optional[Any]=0.02 ,__snake_case :Dict=3 ,__snake_case :Optional[Any]=4 ,__snake_case :Optional[Any]=None ,) -> Tuple:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_token_type_ids
a__ = use_input_mask
a__ = use_labels
a__ = use_mc_token_ids
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = self.vocab_size - 1
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
if self.use_mc_token_ids:
a__ = ids_tensor([self.batch_size, self.num_choices] ,self.seq_length )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
a__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__( self :Optional[Any] ) -> Tuple:
return CTRLConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
def lowerCamelCase__( self :str ,__snake_case :List[str] ,__snake_case :Any ,__snake_case :Dict ,__snake_case :int ,__snake_case :Optional[Any] ,*__snake_case :List[str] ) -> List[Any]:
a__ = CTRLModel(config=__snake_case )
model.to(__snake_case )
model.eval()
model(__snake_case ,token_type_ids=__snake_case ,head_mask=__snake_case )
model(__snake_case ,token_type_ids=__snake_case )
a__ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) ,config.n_layer )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Union[str, Any] ,__snake_case :str ,__snake_case :str ,__snake_case :Dict ,*__snake_case :Dict ) -> Dict:
a__ = CTRLLMHeadModel(__snake_case )
model.to(__snake_case )
model.eval()
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def lowerCamelCase__( self :Optional[int] ,__snake_case :Tuple ,__snake_case :str ,__snake_case :str ,__snake_case :List[str] ,*__snake_case :Optional[int] ) -> List[Any]:
a__ = self.num_labels
a__ = CTRLForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = model(__snake_case ,token_type_ids=__snake_case ,labels=__snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
@require_torch
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : Any = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Any ,__snake_case :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCamelCase__( self :int ) -> List[str]:
a__ = CTRLModelTester(self )
a__ = ConfigTester(self ,config_class=__snake_case ,n_embd=37 )
def lowerCamelCase__( self :str ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :str ) -> str:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__snake_case )
def lowerCamelCase__( self :List[Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
pass
@slow
def lowerCamelCase__( self :int ) -> List[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = CTRLModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def lowerCamelCase__( self :Dict ) -> List[str]:
pass
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Union[str, Any] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCamelCase__( self :Any ) -> Dict:
a__ = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__snake_case )
a__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] ,dtype=torch.long ,device=__snake_case ) # Legal the president is
a__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
a__ = model.generate(__snake_case ,do_sample=__snake_case )
self.assertListEqual(output_ids[0].tolist() ,__snake_case )
| 657 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case : Optional[int] = None
snake_case : Any = logging.get_logger(__name__)
snake_case : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : Any = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
snake_case : Any = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
snake_case : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : str = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self :List[str] ,__snake_case :List[str]=None ,__snake_case :Any=None ,__snake_case :Dict="<s>" ,__snake_case :List[Any]="</s>" ,__snake_case :List[Any]="</s>" ,__snake_case :Any="<s>" ,__snake_case :str="<unk>" ,__snake_case :Union[str, Any]="<pad>" ,__snake_case :Dict="<mask>" ,__snake_case :Optional[Any]=None ,__snake_case :int=None ,__snake_case :Any=None ,__snake_case :str=False ,**__snake_case :Optional[int] ,) -> str:
# Mask token behave like a normal word, i.e. include the space before it
a__ = AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ) if isinstance(__snake_case ,__snake_case ) else mask_token
a__ = legacy_behaviour
super().__init__(
vocab_file=__snake_case ,tokenizer_file=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,sep_token=__snake_case ,cls_token=__snake_case ,unk_token=__snake_case ,pad_token=__snake_case ,mask_token=__snake_case ,src_lang=__snake_case ,tgt_lang=__snake_case ,additional_special_tokens=__snake_case ,legacy_behaviour=__snake_case ,**__snake_case ,)
a__ = vocab_file
a__ = False if not self.vocab_file else True
a__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
a__ = {
lang_code: self.convert_tokens_to_ids(__snake_case ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a__ = src_lang if src_lang is not None else 'eng_Latn'
a__ = self.convert_tokens_to_ids(self._src_lang )
a__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase__( self :List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ) -> None:
a__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase__( self :int ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Any ,__snake_case :str ,__snake_case :Optional[str] ,__snake_case :Optional[str] ,**__snake_case :Dict ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
a__ = src_lang
a__ = self(__snake_case ,add_special_tokens=__snake_case ,return_tensors=__snake_case ,**__snake_case )
a__ = self.convert_tokens_to_ids(__snake_case )
a__ = tgt_lang_id
return inputs
def lowerCamelCase__( self :List[Any] ,__snake_case :List[str] ,__snake_case :str = "eng_Latn" ,__snake_case :Optional[List[str]] = None ,__snake_case :str = "fra_Latn" ,**__snake_case :Tuple ,) -> BatchEncoding:
a__ = src_lang
a__ = tgt_lang
return super().prepare_seqaseq_batch(__snake_case ,__snake_case ,**__snake_case )
def lowerCamelCase__( self :int ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase__( self :Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any] ) -> None:
a__ = self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
a__ = []
a__ = [self.eos_token_id, self.cur_lang_code]
else:
a__ = [self.cur_lang_code]
a__ = [self.eos_token_id]
a__ = self.convert_ids_to_tokens(self.prefix_tokens )
a__ = self.convert_ids_to_tokens(self.suffix_tokens )
a__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def lowerCamelCase__( self :int ,__snake_case :str ) -> None:
a__ = self.convert_tokens_to_ids(__snake_case )
if self.legacy_behaviour:
a__ = []
a__ = [self.eos_token_id, self.cur_lang_code]
else:
a__ = [self.cur_lang_code]
a__ = [self.eos_token_id]
a__ = self.convert_ids_to_tokens(self.prefix_tokens )
a__ = self.convert_ids_to_tokens(self.suffix_tokens )
a__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str ,pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file ,__snake_case )
return (out_vocab_file,)
| 657 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Union[str, Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Any ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :int ,**__snake_case :Optional[int] ) -> torch.FloatTensor:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Dict ,__snake_case :Any ) -> Optional[Any]:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Any ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 657 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase__ : Union[str, Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCamelCase__( self :List[str] ) -> str:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCamelCase__( self :List[str] ) -> str:
a__ = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='tf' )
a__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 3_80_15, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 2_55_06, 'token_str': ' accuser'},
] ,)
a__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 3_80_15,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 2_55_06,
'token_str': ' accuser',
},
] ,)
a__ = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 29_41, 'token_str': ' Te'},
] ,)
@require_torch
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,top_k=2 ,framework='pt' )
a__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 3_56_76, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] ,)
a__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS'},
] ,)
a__ = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 29_41, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 1_36_06, 'token_str': ' Clara'},
] ,)
a__ = unmasker('My name is <mask> <mask>' ,top_k=2 )
self.assertEqual(
nested_simplify(__snake_case ,decimals=6 ) ,[
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 3_56_76,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 1_64_16, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] ,)
@require_torch_gpu
def lowerCamelCase__( self :Any ) -> Optional[int]:
a__ = pipeline('fill-mask' ,model='hf-internal-testing/tiny-random-distilbert' ,device=0 ,framework='pt' )
# convert model to fp16
pipe.model.half()
a__ = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case ,__snake_case )
@slow
@require_torch
def lowerCamelCase__( self :List[Any] ) -> int:
a__ = pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='pt' )
self.run_large_test(__snake_case )
@slow
@require_tf
def lowerCamelCase__( self :Dict ) -> str:
a__ = pipeline(task='fill-mask' ,model='distilroberta-base' ,top_k=2 ,framework='tf' )
self.run_large_test(__snake_case )
def lowerCamelCase__( self :int ,__snake_case :Optional[int] ) -> Union[str, Any]:
a__ = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ) ,[
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 6_10, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 15_73, 'token_str': ' Chris'},
] ,)
a__ = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(__snake_case ) ,[
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 22_01,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 1_27_90,
'token_str': ' Lyon',
},
] ,)
a__ = unmasker('My name is <mask>' ,targets=[' Patrick', ' Clara', ' Teven'] ,top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) ,[
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 34_99, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 1_36_06, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 29_41, 'token_str': ' Te'},
] ,)
@require_torch
def lowerCamelCase__( self :List[str] ) -> int:
a__ = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='pt' )
a__ = None
a__ = None
self.run_pipeline_test(__snake_case ,[] )
@require_tf
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ = pipeline(task='fill-mask' ,model='sshleifer/tiny-distilroberta-base' ,framework='tf' )
a__ = None
a__ = None
self.run_pipeline_test(__snake_case ,[] )
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :int ) -> Any:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
a__ = [
F'This is another {tokenizer.mask_token} test',
]
return fill_masker, examples
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ,__snake_case :List[Any] ) -> Any:
a__ = fill_masker.tokenizer
a__ = fill_masker.model
a__ = fill_masker(
F'This is a {tokenizer.mask_token}' ,)
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
a__ = fill_masker([F'This is a {tokenizer.mask_token}'] )
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
a__ = fill_masker([F'This is a {tokenizer.mask_token}', F'Another {tokenizer.mask_token} great test.'] )
self.assertEqual(
__snake_case ,[
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
] ,)
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('This is' )
self.run_test_top_k(__snake_case ,__snake_case )
self.run_test_targets(__snake_case ,__snake_case )
self.run_test_top_k_targets(__snake_case ,__snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case ,__snake_case )
self.fill_mask_with_multiple_masks(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :Optional[int] ) -> str:
a__ = tokenizer.get_vocab()
a__ = sorted(vocab.keys() )[:2]
# Pipeline argument
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case ,targets=__snake_case )
a__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
a__ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,__snake_case )
a__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(__snake_case ) )
# Call argument
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets=__snake_case )
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
a__ = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} ,__snake_case )
a__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} ,set(__snake_case ) )
# Score equivalence
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets=__snake_case )
a__ = [top_mask['token_str'] for top_mask in outputs]
a__ = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets=__snake_case )
a__ = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) ,nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets=[''] )
with self.assertRaises(__snake_case ):
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,targets='' )
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> List[Any]:
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case ,top_k=2 )
a__ = fill_masker(F'This is a {tokenizer.mask_token}' )
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,top_k=2 )
self.assertEqual(
__snake_case ,[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
] ,)
self.assertEqual(nested_simplify(__snake_case ) ,nested_simplify(__snake_case ) )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Tuple ,__snake_case :str ) -> Tuple:
a__ = tokenizer.get_vocab()
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
# top_k=2, ntargets=3
a__ = sorted(vocab.keys() )[:3]
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,top_k=2 ,targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
a__ = [el['token_str'] for el in sorted(__snake_case ,key=lambda __snake_case : x["score"] ,reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
a__ = fill_masker(F'This is a {tokenizer.mask_token}' ,top_k=3 ,targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) ,nested_simplify(__snake_case ) )
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :int ) -> Tuple:
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
a__ = tokenizer.get_vocab()
# String duplicates + id duplicates
a__ = sorted(vocab.keys() )[:3]
a__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
a__ = fill_masker(F'My name is {tokenizer.mask_token}' ,targets=__snake_case ,top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) ,3 )
def lowerCamelCase__( self :Tuple ,__snake_case :Optional[int] ,__snake_case :Optional[int] ) -> Union[str, Any]:
a__ = FillMaskPipeline(model=__snake_case ,tokenizer=__snake_case )
a__ = fill_masker(
F'This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}' ,top_k=2 )
self.assertEqual(
__snake_case ,[
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
[
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
{'sequence': ANY(__snake_case ), 'score': ANY(__snake_case ), 'token': ANY(__snake_case ), 'token_str': ANY(__snake_case )},
],
] ,)
| 657 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : Any = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''MobileViTFeatureExtractor''']
snake_case : int = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ):
a__ = OmegaConf.load(__lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )['model']
a__ = list(state_dict.keys() )
# extract state_dict for VQVAE
a__ = {}
a__ = 'first_stage_model.'
for key in keys:
if key.startswith(__lowerCAmelCase ):
a__ = state_dict[key]
# extract state_dict for UNetLDM
a__ = {}
a__ = 'model.diffusion_model.'
for key in keys:
if key.startswith(__lowerCAmelCase ):
a__ = state_dict[key]
a__ = config.model.params.first_stage_config.params
a__ = config.model.params.unet_config.params
a__ = VQModel(**__lowerCAmelCase ).eval()
vqvae.load_state_dict(__lowerCAmelCase )
a__ = UNetLDMModel(**__lowerCAmelCase ).eval()
unet.load_state_dict(__lowerCAmelCase )
a__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCAmelCase , )
a__ = LDMPipeline(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
pipeline.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : int = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
snake_case : Optional[int] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 657 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
snake_case : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
a__ = TOKENIZER_CLASSES
else:
a__ = {tokenizer_name: getattr(__lowerCAmelCase , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
a__ = TOKENIZER_CLASSES[tokenizer_name]
a__ = True
if checkpoint_name is None:
a__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
a__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
a__ = tokenizer_class.from_pretrained(__lowerCAmelCase , force_download=__lowerCAmelCase )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
a__ , a__ = checkpoint.split('/' )
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
elif add_prefix:
a__ = checkpoint
a__ = dump_path
else:
a__ = None
a__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
a__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
a__ = file_path.split(__lowerCAmelCase )[-1][0]
if next_char == "/":
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
a__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
a__ = tokenizer.save_pretrained(
__lowerCAmelCase , legacy_format=__lowerCAmelCase , filename_prefix=__lowerCAmelCase )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowerCAmelCase )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
snake_case : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 657 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
snake_case : Optional[Any] = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = ['''input_features''', '''is_longer''']
def __init__( self :Union[str, Any] ,__snake_case :Optional[Any]=64 ,__snake_case :Dict=4_80_00 ,__snake_case :Any=4_80 ,__snake_case :Union[str, Any]=10 ,__snake_case :List[Any]=10_24 ,__snake_case :Dict=0.0 ,__snake_case :Any=False ,__snake_case :float = 0 ,__snake_case :float = 1_40_00 ,__snake_case :int = None ,__snake_case :str = "fusion" ,__snake_case :str = "repeatpad" ,**__snake_case :Optional[int] ,) -> Optional[int]:
super().__init__(
feature_size=__snake_case ,sampling_rate=__snake_case ,padding_value=__snake_case ,return_attention_mask=__snake_case ,**__snake_case ,)
a__ = top_db
a__ = truncation
a__ = padding
a__ = fft_window_size
a__ = (fft_window_size >> 1) + 1
a__ = hop_length
a__ = max_length_s
a__ = max_length_s * sampling_rate
a__ = sampling_rate
a__ = frequency_min
a__ = frequency_max
a__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__snake_case ,min_frequency=__snake_case ,max_frequency=__snake_case ,sampling_rate=__snake_case ,norm=__snake_case ,mel_scale='htk' ,)
a__ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=__snake_case ,min_frequency=__snake_case ,max_frequency=__snake_case ,sampling_rate=__snake_case ,norm='slaney' ,mel_scale='slaney' ,)
def lowerCamelCase__( self :str ) -> Dict[str, Any]:
a__ = copy.deepcopy(self.__dict__ )
a__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowerCamelCase__( self :int ,__snake_case :np.array ,__snake_case :Optional[np.array] = None ) -> np.ndarray:
a__ = spectrogram(
__snake_case ,window_function(self.fft_window_size ,'hann' ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=__snake_case ,log_mel='dB' ,)
return log_mel_spectrogram.T
def lowerCamelCase__( self :List[str] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict ) -> Optional[Any]:
a__ = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
a__ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
a__ = [0]
# randomly choose index for each part
a__ = np.random.choice(ranges[0] )
a__ = np.random.choice(ranges[1] )
a__ = np.random.choice(ranges[2] )
a__ = mel[idx_front : idx_front + chunk_frames, :]
a__ = mel[idx_middle : idx_middle + chunk_frames, :]
a__ = mel[idx_back : idx_back + chunk_frames, :]
a__ = torch.tensor(mel[None, None, :] )
a__ = torch.nn.functional.interpolate(
__snake_case ,size=[chunk_frames, 64] ,mode='bilinear' ,align_corners=__snake_case )
a__ = mel_shrink[0][0].numpy()
a__ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def lowerCamelCase__( self :Tuple ,__snake_case :np.array ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :Dict ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
a__ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
a__ = len(__snake_case ) - max_length
a__ = np.random.randint(0 ,overflow + 1 )
a__ = waveform[idx : idx + max_length]
a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters )
a__ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
a__ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
a__ = np.stack([mel, mel, mel, mel] ,axis=0 )
a__ = False
else:
a__ = self._random_mel_fusion(__snake_case ,__snake_case ,__snake_case )
a__ = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
a__ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
a__ = int(max_length / len(__snake_case ) )
a__ = np.stack(np.tile(__snake_case ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
a__ = int(max_length / len(__snake_case ) )
a__ = np.stack(np.tile(__snake_case ,__snake_case ) )
a__ = np.pad(__snake_case ,(0, max_length - waveform.shape[0]) ,mode='constant' ,constant_values=0 )
if truncation == "fusion":
a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters )
a__ = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
a__ = self._np_extract_fbank_features(__snake_case ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self :Optional[Any] ,__snake_case :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__snake_case :str = None ,__snake_case :Optional[str] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[Union[str, TensorType]] = None ,**__snake_case :Optional[Any] ,) -> BatchFeature:
a__ = truncation if truncation is not None else self.truncation
a__ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
a__ = isinstance(__snake_case ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
a__ = is_batched_numpy or (
isinstance(__snake_case ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case ,np.ndarray ):
a__ = np.asarray(__snake_case ,dtype=np.floataa )
elif isinstance(__snake_case ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ = [np.asarray(__snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
a__ = [
self._get_input_mel(__snake_case ,max_length if max_length else self.nb_max_samples ,__snake_case ,__snake_case )
for waveform in raw_speech
]
a__ = []
a__ = []
for mel, longer in padded_inputs:
input_mel.append(__snake_case )
is_longer.append(__snake_case )
if truncation == "fusion" and sum(__snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
a__ = np.random.randint(0 ,len(__snake_case ) )
a__ = True
if isinstance(input_mel[0] ,__snake_case ):
a__ = [np.asarray(__snake_case ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
a__ = [[longer] for longer in is_longer]
a__ = {'input_features': input_mel, 'is_longer': is_longer}
a__ = BatchFeature(__snake_case )
if return_tensors is not None:
a__ = input_features.convert_to_tensors(__snake_case )
return input_features
| 657 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 |
from sklearn.metrics import fa_score
import datasets
snake_case : Optional[int] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case : List[Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case : Union[str, Any] = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :Any ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :str ,__snake_case :Dict=None ,__snake_case :str=1 ,__snake_case :Optional[int]="binary" ,__snake_case :Union[str, Any]=None ) -> Tuple:
a__ = fa_score(
__snake_case ,__snake_case ,labels=__snake_case ,pos_label=__snake_case ,average=__snake_case ,sample_weight=__snake_case )
return {"f1": float(__snake_case ) if score.size == 1 else score}
| 657 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
torch.manual_seed(0 )
a__ = UNetaDModel(
sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('AttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'AttnUpBlock2D') ,)
return model
@property
def lowerCamelCase__( self :str ) -> Tuple:
torch.manual_seed(0 )
a__ = UNetaDConditionModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,cross_attention_dim=10 ,)
return model
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
torch.manual_seed(0 )
a__ = AutoencoderKL(
sample_size=(1_28, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,)
a__ = UNetaDModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(1_28, 1_28) ,down_block_types=('AttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'AttnUpBlock2D') ,)
return vqvae, unet
@slow
def lowerCamelCase__( self :Optional[Any] ) -> Optional[Any]:
a__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
a__ = DDPMScheduler()
a__ = AudioDiffusionPipeline(vqvae=__snake_case ,unet=self.dummy_unet ,mel=__snake_case ,scheduler=__snake_case )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = torch.Generator(device=__snake_case ).manual_seed(42 )
a__ = pipe(generator=__snake_case ,steps=4 )
a__ = output.audios[0]
a__ = output.images[0]
a__ = torch.Generator(device=__snake_case ).manual_seed(42 )
a__ = pipe(generator=__snake_case ,steps=4 ,return_dict=__snake_case )
a__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10]
a__ = np.frombuffer(image_from_tuple.tobytes() ,dtype='uint8' )[:10]
a__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
a__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
a__ = DDIMScheduler()
a__ = self.dummy_vqvae_and_unet
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=__snake_case ,scheduler=__snake_case )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
a__ = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
a__ = torch.Generator(device=__snake_case ).manual_seed(42 )
a__ = pipe(raw_audio=__snake_case ,generator=__snake_case ,start_step=5 ,steps=10 )
a__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
a__ = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10]
a__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
a__ = self.dummy_unet_condition
a__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=__snake_case ,mel=__snake_case ,scheduler=__snake_case )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
np.random.seed(0 )
a__ = torch.rand((1, 1, 10) )
a__ = pipe(generator=__snake_case ,encoding=__snake_case )
a__ = output.images[0]
a__ = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10]
a__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :List[Any] ) -> List[str]:
a__ = torch_device
a__ = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' )
a__ = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
a__ = torch.Generator(device=__snake_case ).manual_seed(42 )
a__ = pipe(generator=__snake_case )
a__ = output.audios[0]
a__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
a__ = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10]
a__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 657 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case : Any = logging.get_logger(__name__)
snake_case : Tuple = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :Dict=None ,__snake_case :int=None ,*__snake_case :str ,**__snake_case :Union[str, Any] ) -> Tuple:
super().__init__(*__snake_case ,**__snake_case )
if config is None:
assert isinstance(self.model ,__snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
a__ = self.model.config
else:
a__ = config
a__ = data_args
a__ = self.config.tgt_vocab_size if isinstance(self.config ,__snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
a__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
a__ = label_smoothed_nll_loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :int ) -> Tuple:
if self.optimizer is None:
a__ = ['bias', 'LayerNorm.weight']
a__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
a__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
a__ = Adafactor
a__ = {'scale_parameter': False, 'relative_step': False}
else:
a__ = AdamW
a__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
a__ = self.args.learning_rate
if self.sharded_ddp:
a__ = OSS(
params=__snake_case ,optim=__snake_case ,**__snake_case ,)
else:
a__ = optimizer_cls(__snake_case ,**__snake_case )
if self.lr_scheduler is None:
a__ = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Union[str, Any]:
a__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
a__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
a__ = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
a__ = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=__snake_case )
return scheduler
def lowerCamelCase__( self :Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCamelCase__( self :str ,__snake_case :Optional[int] ,__snake_case :List[Any] ,__snake_case :Any ) -> Optional[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
a__ , a__ = model(**__snake_case ,labels=__snake_case ,use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
a__ = model(**__snake_case ,use_cache=__snake_case )[0]
a__ = torch.nn.functional.log_softmax(__snake_case ,dim=-1 )
a__ , a__ = self.loss_fn(__snake_case ,__snake_case ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCamelCase__( self :List[Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> Any:
a__ = inputs.pop('labels' )
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
return loss
def lowerCamelCase__( self :Optional[Any] ,__snake_case :nn.Module ,__snake_case :Dict[str, Union[torch.Tensor, Any]] ,__snake_case :bool ,__snake_case :Optional[List[str]] = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
a__ = self._prepare_inputs(__snake_case )
a__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
a__ = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**__snake_case ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
a__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
a__ , a__ = self._compute_loss(__snake_case ,__snake_case ,__snake_case )
a__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
a__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
a__ = self._pad_tensors_to_max_len(__snake_case ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def lowerCamelCase__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :Union[str, Any] ) -> int:
# If PAD token is not defined at least EOS token has to be defined
a__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
a__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
a__ = tensor
return padded_tensor
| 657 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
snake_case : int = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
def run_func(__lowerCAmelCase : Any ):
@wraps(__lowerCAmelCase )
def run_in_eager_mode(*__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
@wraps(__lowerCAmelCase )
@tf.function(experimental_compile=__lowerCAmelCase )
def run_in_graph_mode(*__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : int ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
a__ = random.Random()
a__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : TensorFlowBenchmarkArguments
UpperCAmelCase__ : PretrainedConfig
UpperCAmelCase__ : str = "TensorFlow"
@property
def lowerCamelCase__( self :Any ) -> Tuple:
return tf.__version__
def lowerCamelCase__( self :str ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> float:
# initialize GPU on separate process
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(__snake_case ,__snake_case ,__snake_case )
return self._measure_speed(_inference )
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> float:
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(__snake_case ,__snake_case ,__snake_case )
return self._measure_speed(_train )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,__snake_case )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_inference_func(__snake_case ,__snake_case ,__snake_case )
return self._measure_memory(_inference )
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] ,__snake_case )
a__ = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
a__ = self._prepare_train_func(__snake_case ,__snake_case ,__snake_case )
return self._measure_memory(_train )
def lowerCamelCase__( self :int ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> Callable[[], None]:
a__ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(__snake_case ,'architectures' )
and isinstance(config.architectures ,__snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(__snake_case ,__snake_case )
a__ = model_cls(__snake_case )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(__snake_case ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(__snake_case ,__snake_case ,__snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_forward():
return model(__snake_case ,decoder_input_ids=__snake_case ,training=__snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_forward():
return model(__snake_case ,training=__snake_case )
a__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :int ,__snake_case :int ) -> Callable[[], None]:
a__ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
a__ = (
hasattr(__snake_case ,'architectures' )
and isinstance(config.architectures ,__snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
a__ = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
a__ = __import__('transformers' ,fromlist=[model_class] )
a__ = getattr(__snake_case ,__snake_case )
a__ = model_cls(__snake_case )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
a__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
a__ = config.vocab_size if hasattr(__snake_case ,'vocab_size' ) else config.encoder.vocab_size
a__ = random_input_ids(__snake_case ,__snake_case ,__snake_case )
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_decoder_train():
a__ = model(__snake_case ,decoder_input_ids=__snake_case ,labels=__snake_case ,training=__snake_case )[0]
a__ = tf.gradients(__snake_case ,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode ,self.args.use_xla )
def encoder_train():
a__ = model(__snake_case ,labels=__snake_case ,training=__snake_case )[0]
a__ = tf.gradients(__snake_case ,model.trainable_variables )
return gradients
a__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(__snake_case ,repeat=1 ,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
a__ = timeit.repeat(
__snake_case ,repeat=self.args.repeat ,number=10 ,)
return min(__snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def lowerCamelCase__( self :str ,__snake_case :Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
a__ = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
a__ = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
a__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
a__ = nvml.nvmlDeviceGetMemoryInfo(__snake_case )
a__ = meminfo.used
a__ = Memory(__snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
a__ = None
else:
a__ = measure_peak_memory_cpu(__snake_case )
a__ = Memory(__snake_case ) if isinstance(__snake_case ,__snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
a__ = stop_memory_tracing(__snake_case )
if memory is None:
a__ = summary.total
else:
a__ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 657 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
snake_case : Dict = '''
Human: <<task>>
Assistant: '''
snake_case : Optional[int] = '''huggingface-tools/default-prompts'''
snake_case : Tuple = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any="run" ):
if prompt_or_repo_id is None:
a__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , __lowerCAmelCase ) is not None:
return prompt_or_repo_id
a__ = cached_file(
__lowerCAmelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(__lowerCAmelCase , 'r' , encoding='utf-8' ) as f:
return f.read()
| 657 | 1 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :Tuple ,__snake_case :List[Any] ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=__snake_case ,scheduler=__snake_case )
@torch.no_grad()
def __call__( self :str ,__snake_case :int = 1 ,__snake_case :Optional[torch.Generator] = None ,__snake_case :int = 50 ,__snake_case :Optional[str] = "pil" ,__snake_case :bool = True ,**__snake_case :List[str] ,) -> Union[ImagePipelineOutput, Tuple]:
a__ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=__snake_case ,)
a__ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a__ = self.unet(__snake_case ,__snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a__ = self.scheduler.step(__snake_case ,__snake_case ,__snake_case ).prev_sample
a__ = (image / 2 + 0.5).clamp(0 ,1 )
a__ = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
a__ = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__snake_case ), "This is a local test"
| 657 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case : Optional[Any] = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 657 |
def __lowercase ( __lowerCAmelCase : int = 2_0_0 ):
a__ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
a__ = [0] * (pence + 1)
a__ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 657 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
snake_case : Union[str, Any] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
a__ = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
snake_case : Tuple = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = list(s_dict.keys() )
for key in keys:
a__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a__ = new_key.replace(__lowerCAmelCase , __lowerCAmelCase )
print(F'{key} -> {new_key}' )
a__ = s_dict.pop(__lowerCAmelCase )
return s_dict
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ , a__ = emb.weight.shape
a__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
a__ = emb.weight.data
return lin_layer
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
a__ = os.path.basename(__lowerCAmelCase )
a__ = url.split('/' )[-2]
a__ = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not os.path.isfile(__lowerCAmelCase ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(__lowerCAmelCase ):
a__ = open(__lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(__lowerCAmelCase ) as source, open(__lowerCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=8_0 , unit='iB' , unit_scale=__lowerCAmelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
a__ = source.read(8_1_9_2 )
if not buffer:
break
output.write(__lowerCAmelCase )
loop.update(len(__lowerCAmelCase ) )
a__ = open(__lowerCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ):
if ".pt" not in checkpoint_path:
a__ = _download(_MODELS[checkpoint_path] )
else:
a__ = torch.load(__lowerCAmelCase , map_location='cpu' )
a__ = original_checkpoint['dims']
a__ = original_checkpoint['model_state_dict']
a__ = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(__lowerCAmelCase )
rename_keys(__lowerCAmelCase )
a__ = True
a__ = state_dict['decoder.layers.0.fc1.weight'].shape[0]
a__ = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=__lowerCAmelCase , decoder_ffn_dim=__lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
a__ = WhisperForConditionalGeneration(__lowerCAmelCase )
a__ , a__ = model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0 and not set(__lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F' but all the following weights are missing {missing}' )
if tie_embeds:
a__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a__ = proj_out_weights
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
snake_case : Optional[Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 657 |
from manim import *
class snake_case_ (lowerCamelCase_ ):
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = Rectangle(height=0.5 ,width=0.5 )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
a__ = Rectangle(height=0.25 ,width=0.25 )
a__ = [mem.copy() for i in range(6 )]
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('CPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(4 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('GPU' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
a__ = [mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Model' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
a__ = []
a__ = []
for i, rect in enumerate(__snake_case ):
a__ = fill.copy().set_fill(__snake_case ,opacity=0.8 )
target.move_to(__snake_case )
model_arr.append(__snake_case )
a__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case ,*__snake_case )
a__ = [meta_mem.copy() for i in range(6 )]
a__ = [meta_mem.copy() for i in range(6 )]
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(*__snake_case ).arrange(__snake_case ,buff=0 )
a__ = VGroup(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0 )
a__ = Text('Disk' ,font_size=24 )
a__ = Group(__snake_case ,__snake_case ).arrange(__snake_case ,buff=0.5 ,aligned_edge=__snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(__snake_case ,__snake_case )
a__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case ,__snake_case )
a__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(__snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__snake_case )
a__ = MarkupText(
F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) )
a__ = Square(0.3 )
input.set_fill(__snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__snake_case ,buff=0.5 )
self.play(Write(__snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__snake_case ,buff=0.02 )
self.play(MoveToTarget(__snake_case ) )
self.play(FadeOut(__snake_case ) )
a__ = Arrow(start=__snake_case ,end=__snake_case ,color=__snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a__ = MarkupText(
F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) )
a__ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(__snake_case ) ,Circumscribe(model_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
a__ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
a__ = AnimationGroup(
FadeOut(__snake_case ,run_time=0.5 ) ,MoveToTarget(__snake_case ,run_time=0.5 ) ,FadeIn(__snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a__ = 0.7
self.play(
Circumscribe(model_arr[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**__snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,Circumscribe(model_arr[i + 1] ,color=__snake_case ,**__snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=__snake_case ,**__snake_case ) ,Circumscribe(gpu_rect[0] ,color=__snake_case ,**__snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
a__ = a_c
a__ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__snake_case ) ,FadeOut(__snake_case ,run_time=0.5 ) ,)
a__ = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ,run_time=3 ) ,MoveToTarget(__snake_case ) )
self.wait()
| 657 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : Any = '''▁'''
snake_case : Tuple = {'''vocab_file''': '''spiece.model'''}
snake_case : Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
snake_case : Optional[int] = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] ,__snake_case :Dict ,__snake_case :Dict="</s>" ,__snake_case :Union[str, Any]="<unk>" ,__snake_case :List[Any]=[] ,__snake_case :Optional[Dict[str, Any]] = None ,**__snake_case :Dict ,) -> None:
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__snake_case ,unk_token=__snake_case ,additional_special_tokens=__snake_case ,sp_model_kwargs=self.sp_model_kwargs ,**__snake_case ,)
a__ = vocab_file
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
@property
def lowerCamelCase__( self :str ) -> Tuple:
return self.sp_model.get_piece_size()
def lowerCamelCase__( self :Any ) -> Dict[str, int]:
a__ = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Union[str, Any] ) -> str:
a__ = self.__dict__.copy()
a__ = None
return state
def __setstate__( self :Optional[Any] ,__snake_case :Any ) -> Optional[Any]:
a__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ) -> List[str]:
return self.sp_model.encode(__snake_case ,out_type=__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :List[Any] ) -> Optional[int]:
return self.sp_model.piece_to_id(__snake_case )
def lowerCamelCase__( self :Any ,__snake_case :Dict ) -> str:
if index < self.sp_model.get_piece_size():
a__ = self.sp_model.IdToPiece(__snake_case )
return token
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ) -> List[str]:
a__ = []
a__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
a__ = []
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def lowerCamelCase__( self :Optional[int] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
a__ = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case ,'wb' ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 657 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
snake_case : List[Any] = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = '''ernie_m'''
UpperCAmelCase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self :int ,__snake_case :int = 25_00_02 ,__snake_case :int = 7_68 ,__snake_case :int = 12 ,__snake_case :int = 12 ,__snake_case :int = 30_72 ,__snake_case :str = "gelu" ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :int = 5_14 ,__snake_case :float = 0.02 ,__snake_case :int = 1 ,__snake_case :float = 1E-05 ,__snake_case :str=None ,__snake_case :Optional[Any]=False ,__snake_case :Optional[Any]=0.0 ,**__snake_case :str ,) -> Dict:
super().__init__(pad_token_id=__snake_case ,**__snake_case )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = initializer_range
a__ = layer_norm_eps
a__ = classifier_dropout
a__ = is_decoder
a__ = act_dropout
| 657 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
a__ = SwinvaConfig()
a__ = swinva_name.split('_' )
a__ = name_split[1]
if "to" in name_split[3]:
a__ = int(name_split[3][-3:] )
else:
a__ = int(name_split[3] )
if "to" in name_split[2]:
a__ = int(name_split[2][-2:] )
else:
a__ = int(name_split[2][6:] )
if model_size == "tiny":
a__ = 9_6
a__ = (2, 2, 6, 2)
a__ = (3, 6, 1_2, 2_4)
elif model_size == "small":
a__ = 9_6
a__ = (2, 2, 1_8, 2)
a__ = (3, 6, 1_2, 2_4)
elif model_size == "base":
a__ = 1_2_8
a__ = (2, 2, 1_8, 2)
a__ = (4, 8, 1_6, 3_2)
else:
a__ = 1_9_2
a__ = (2, 2, 1_8, 2)
a__ = (6, 1_2, 2_4, 4_8)
if "to" in swinva_name:
a__ = (1_2, 1_2, 1_2, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
a__ = 2_1_8_4_1
a__ = 'huggingface/label-files'
a__ = 'imagenet-22k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
else:
a__ = 1_0_0_0
a__ = 'huggingface/label-files'
a__ = 'imagenet-1k-id2label.json'
a__ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
a__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
a__ = img_size
a__ = num_classes
a__ = embed_dim
a__ = depths
a__ = num_heads
a__ = window_size
return config
def __lowercase ( __lowerCAmelCase : List[Any] ):
if "patch_embed.proj" in name:
a__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a__ = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a__ = 'encoder.' + name
if "attn.proj" in name:
a__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a__ = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
a__ = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
a__ = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
a__ = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
a__ = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
a__ = 'layernorm.weight'
if name == "norm.bias":
a__ = 'layernorm.bias'
if "head" in name:
a__ = name.replace('head' , 'classifier' )
else:
a__ = 'swinv2.' + name
return name
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(__lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
a__ = key.split('.' )
a__ = int(key_split[1] )
a__ = int(key_split[3] )
a__ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a__ = val[:dim, :]
a__ = val[dim : dim * 2, :]
a__ = val[-dim:, :]
else:
a__ = val[:dim]
a__ = val[
dim : dim * 2
]
a__ = val[-dim:]
else:
a__ = val
return orig_state_dict
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
a__ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
a__ = get_swinva_config(__lowerCAmelCase )
a__ = SwinvaForImageClassification(__lowerCAmelCase )
model.eval()
a__ = convert_state_dict(timm_model.state_dict() , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
a__ = image_processor(images=__lowerCAmelCase , return_tensors='pt' )
a__ = timm_model(inputs['pixel_values'] )
a__ = model(**__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
model.push_to_hub(
repo_path_or_name=Path(__lowerCAmelCase , __lowerCAmelCase ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case : Any = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 657 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
snake_case : Optional[Any] = random.Random()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict=1.0 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=None ):
if rng is None:
a__ = global_rng
a__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class snake_case_ (unittest.TestCase ):
def __init__( self :Any ,__snake_case :Tuple ,__snake_case :Optional[int]=7 ,__snake_case :List[Any]=4_00 ,__snake_case :Optional[int]=20_00 ,__snake_case :Tuple=24 ,__snake_case :Union[str, Any]=24 ,__snake_case :int=0.0 ,__snake_case :str=1_60_00 ,__snake_case :int=True ,__snake_case :Any=True ,) -> Optional[Any]:
a__ = parent
a__ = batch_size
a__ = min_seq_length
a__ = max_seq_length
a__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ = feature_size
a__ = num_mel_bins
a__ = padding_value
a__ = sampling_rate
a__ = return_attention_mask
a__ = do_normalize
def lowerCamelCase__( self :str ) -> Any:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__( self :List[Any] ,__snake_case :List[Any]=False ,__snake_case :Optional[int]=False ) -> Dict:
def _flatten(__snake_case :str ):
return list(itertools.chain(*__snake_case ) )
if equal_length:
a__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
a__ = [np.asarray(__snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = SpeechaTextFeatureExtractionTester(self )
def lowerCamelCase__( self :int ,__snake_case :List[str] ) -> Optional[int]:
self.assertTrue(np.all(np.mean(__snake_case ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__snake_case ,axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase__( self :List[Any] ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = [np.asarray(__snake_case ) for speech_input in speech_inputs]
# Test feature size
a__ = feature_extractor(__snake_case ,padding=__snake_case ,return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
a__ = feature_extractor(speech_inputs[0] ,return_tensors='np' ).input_features
a__ = feature_extractor(np_speech_inputs[0] ,return_tensors='np' ).input_features
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
# Test batched
a__ = feature_extractor(__snake_case ,return_tensors='np' ).input_features
a__ = feature_extractor(__snake_case ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__snake_case ,__snake_case ):
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
a__ = np.asarray(__snake_case )
a__ = feature_extractor(__snake_case ,return_tensors='np' ).input_features
a__ = feature_extractor(__snake_case ,return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(__snake_case ,__snake_case ):
self.assertTrue(np.allclose(__snake_case ,__snake_case ,atol=1E-3 ) )
def lowerCamelCase__( self :List[str] ) -> str:
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = ['longest', 'max_length', 'do_not_pad']
a__ = [None, 16, None]
for max_length, padding in zip(__snake_case ,__snake_case ):
a__ = feature_extractor(
__snake_case ,padding=__snake_case ,max_length=__snake_case ,return_attention_mask=__snake_case )
a__ = inputs.input_features
a__ = inputs.attention_mask
a__ = [np.sum(__snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase__( self :Dict ) -> Dict:
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = ['longest', 'max_length', 'do_not_pad']
a__ = [None, 16, None]
for max_length, padding in zip(__snake_case ,__snake_case ):
a__ = feature_extractor(
__snake_case ,max_length=__snake_case ,padding=__snake_case ,return_tensors='np' ,return_attention_mask=__snake_case )
a__ = inputs.input_features
a__ = inputs.attention_mask
a__ = [np.sum(__snake_case ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase__( self :Optional[int] ) -> int:
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feature_extractor(
__snake_case ,padding='max_length' ,max_length=4 ,truncation=__snake_case ,return_tensors='np' ,return_attention_mask=__snake_case ,)
a__ = inputs.input_features
a__ = inputs.attention_mask
a__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feature_extractor(
__snake_case ,padding='longest' ,max_length=4 ,truncation=__snake_case ,return_tensors='np' ,return_attention_mask=__snake_case ,)
a__ = inputs.input_features
a__ = inputs.attention_mask
a__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
a__ = [floats_list((1, x) )[0] for x in range(8_00 ,14_00 ,2_00 )]
a__ = feature_extractor(
__snake_case ,padding='longest' ,max_length=16 ,truncation=__snake_case ,return_tensors='np' ,return_attention_mask=__snake_case ,)
a__ = inputs.input_features
a__ = inputs.attention_mask
a__ = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def lowerCamelCase__( self :str ) -> int:
import torch
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = np.random.rand(1_00 ,32 ).astype(np.floataa )
a__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
a__ = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase__( self :Dict ,__snake_case :List[str] ) -> Optional[Any]:
from datasets import load_dataset
a__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation' )
# automatic decoding with librispeech
a__ = ds.sort('id' ).select(range(__snake_case ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCamelCase__( self :str ) -> List[str]:
# fmt: off
a__ = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
a__ = self._load_datasamples(1 )
a__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ = feature_extractor(__snake_case ,return_tensors='pt' ).input_features
self.assertEquals(input_features.shape ,(1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,__snake_case ,atol=1E-4 ) )
| 657 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
snake_case : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
snake_case : str = {
'''google/electra-small-generator''': 5_12,
'''google/electra-base-generator''': 5_12,
'''google/electra-large-generator''': 5_12,
'''google/electra-small-discriminator''': 5_12,
'''google/electra-base-discriminator''': 5_12,
'''google/electra-large-discriminator''': 5_12,
}
snake_case : int = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = ElectraTokenizer
def __init__( self :Any ,__snake_case :Dict=None ,__snake_case :Optional[int]=None ,__snake_case :List[Any]=True ,__snake_case :Union[str, Any]="[UNK]" ,__snake_case :int="[SEP]" ,__snake_case :str="[PAD]" ,__snake_case :Tuple="[CLS]" ,__snake_case :List[str]="[MASK]" ,__snake_case :Tuple=True ,__snake_case :List[str]=None ,**__snake_case :int ,) -> int:
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,)
a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case
or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars
):
a__ = getattr(__snake_case ,normalizer_state.pop('type' ) )
a__ = do_lower_case
a__ = strip_accents
a__ = tokenize_chinese_chars
a__ = normalizer_class(**__snake_case )
a__ = do_lower_case
def lowerCamelCase__( self :Dict ,__snake_case :Optional[Any] ,__snake_case :List[str]=None ) -> Union[str, Any]:
a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__( self :Any ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]:
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__( self :Tuple ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]:
a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case )
return tuple(__snake_case )
| 657 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowercase ( *__lowerCAmelCase : List[Any] ):
with open(__lowerCAmelCase , 'r' ) as fh:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_EX )
try:
print(*__lowerCAmelCase )
finally:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_UN )
snake_case : Dict = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
snake_case : str = torch.device('''cuda''', local_rank)
snake_case : int = socket.gethostname()
snake_case : List[str] = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case : List[Any] = dist.get_rank()
snake_case : List[str] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 657 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
snake_case : Union[str, Any] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
snake_case : Tuple = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
snake_case : int = BeautifulSoup(res.text, '''html.parser''')
snake_case : Tuple = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f"""https://google.com{link.get("href")}""")
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 1 |
import qiskit
def __lowercase ( __lowerCAmelCase : int = 2 ):
a__ = qubits
# Using Aer's simulator
a__ = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
a__ = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCAmelCase ) ) , list(range(__lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a__ = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1_0_0_0 )
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 657 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.