code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Dict =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __UpperCamelCase ( _a ):
lowercase : str ='mobilenet_v1'
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=1.0, lowerCAmelCase=8, lowerCAmelCase="relu6", lowerCAmelCase=True, lowerCAmelCase=0.9_9_9, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_0_1, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =depth_multiplier
lowerCamelCase_ =min_depth
lowerCamelCase_ =hidden_act
lowerCamelCase_ =tf_padding
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
class __UpperCamelCase ( _a ):
lowercase : Optional[int] =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a_ ( __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=10 , __snake_case : str=100 , __snake_case : Any=1026 , __snake_case : List[str]=True , __snake_case : int="data/tokenized_stories_train_wikitext103.jbl" , __snake_case : Any="igf_context_pairs.jbl" , ) -> List[str]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase_ =generate_datasets(
_lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1026 , trim=_lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowerCamelCase_ =load_gpta('''gpt2''' ).to(_lowerCAmelCase )
print('''computing perplexity on objective set''' )
lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item()
print('''perplexity on objective set:''' , _lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a_ ( __snake_case : str , __snake_case : Any=15 , __snake_case : Optional[Any]=128 , __snake_case : int=100 , __snake_case : Any="igf_model.pt" , ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
# Load pre-trained model
lowerCamelCase_ =GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase_ =SecondaryLearner(_lowerCAmelCase )
# Train secondary learner
lowerCamelCase_ =train_secondary_learner(
_lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=100 , igf_model_path=_lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Tuple=32 , __snake_case : Optional[Any]=1000 , __snake_case : Union[str, Any]=16 , __snake_case : Union[str, Any]=1.0 , __snake_case : List[Any]=recopy_gpta , __snake_case : Tuple=None , __snake_case : Tuple=10 , __snake_case : Any="gpt2_finetuned.pt" , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ =RandomSampler(_lowerCAmelCase )
lowerCamelCase_ =DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase )
lowerCamelCase_ =max_steps // (len(_lowerCAmelCase )) + 1
lowerCamelCase_ =0
lowerCamelCase_ =torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase )
lowerCamelCase_ =recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCAmelCase )
secondary_learner.eval()
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =[]
lowerCamelCase_ =[]
# Compute the performance of the transformer model at the beginning
lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase )
for epoch in range(int(_lowerCAmelCase ) ):
for step, example in enumerate(_lowerCAmelCase ):
torch.cuda.empty_cache()
lowerCamelCase_ =random.randint(0 , example.size(2 ) - context_len - 1 )
lowerCamelCase_ =example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
lowerCamelCase_ =True
if secondary_learner is not None:
lowerCamelCase_ =secondary_learner.forward(
torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase_ =-1
if predicted_q < threshold:
lowerCamelCase_ =False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase_ =outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase_ =0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase_ =compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
test_perps.append(_lowerCAmelCase )
print('''Test perplexity, step''' , _lowerCAmelCase , ''':''' , _lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_lowerCAmelCase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=_lowerCAmelCase , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=_lowerCAmelCase , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1000 , type=_lowerCAmelCase , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=_lowerCAmelCase , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_lowerCAmelCase , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_lowerCAmelCase , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=_lowerCAmelCase , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1026 , type=_lowerCAmelCase , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_lowerCAmelCase , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_lowerCAmelCase , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_lowerCAmelCase , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_lowerCAmelCase , type=_lowerCAmelCase , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_lowerCAmelCase , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowerCamelCase_ =joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowerCamelCase_ =training_secondary_learner(
_lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowerCamelCase_ =GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase_ =generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1026 , trim=_lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
a_ : int = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
a_ : Tuple = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
a_ : Tuple = """zero2"""
a_ : Optional[int] = """zero3"""
a_ : Union[str, Any] = [ZEROa, ZEROa]
def a_ ( __snake_case : Dict , __snake_case : Dict , __snake_case : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ =parameterized.to_safe_name('''_'''.join(str(__lowerCAmelCase ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
a_ : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __UpperCamelCase ( __lowerCamelCase ):
@parameterized.expand(__lowercase, name_func=__lowercase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.run_and_check(
stage=__lowercase, model=__lowercase, distributed=__lowercase, fpaa=__lowercase, )
@require_torch_multi_gpu
@parameterized.expand(__lowercase, name_func=__lowercase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.run_and_check(
stage=__lowercase, model=__lowercase, distributed=__lowercase, fpaa=__lowercase, )
@parameterized.expand(__lowercase, name_func=__lowercase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.run_and_check(
stage=__lowercase, model=__lowercase, distributed=__lowercase, fpaa=__lowercase, )
@require_torch_multi_gpu
@parameterized.expand(__lowercase, name_func=__lowercase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
self.run_and_check(
stage=__lowercase, model=__lowercase, distributed=__lowercase, fpaa=__lowercase, )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
pass
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 10, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =models[model]
lowerCamelCase_ =self.run_trainer(
stage=__lowercase, model_name=__lowercase, eval_steps=__lowercase, num_train_epochs=1, distributed=__lowercase, fpaa=__lowercase, )
self.do_checks(__lowercase )
return output_dir
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 10, lowerCAmelCase = 1, lowerCAmelCase = True, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =self.get_auto_remove_tmp_dir('''./xxx''', after=__lowercase )
lowerCamelCase_ =f'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['''--fp16'''] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowerCamelCase_ =f'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
lowerCamelCase_ =[f'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
lowerCamelCase_ =self.get_launcher(__lowercase )
lowerCamelCase_ =launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowercase, env=self.get_env() )
return output_dir
def lowercase__ ( self, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =min(2, get_gpu_count() ) if distributed else 1
return f'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
def a_ ( __snake_case : Union[str, Any] ) -> list:
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
lowerCamelCase_ =gray_code_sequence_string(_UpperCAmelCase )
#
# convert them to integers
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase_ =int(sequence[i] , 2 )
return sequence
def a_ ( __snake_case : int ) -> list:
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase_ =1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase_ =gray_code_sequence_string(bit_count - 1 )
lowerCamelCase_ =[]
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase_ ='0' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase_ ='1' + smaller_sequence[i]
sequence.append(_UpperCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
a_ : Tuple = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
a_ : Optional[int] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
a_ : Tuple = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), reference_urls=[], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase_ =np.array([re.sub(__SCREAMING_SNAKE_CASE, '''''', __SCREAMING_SNAKE_CASE ) for x in predictions] )
lowerCamelCase_ =np.array([re.sub(__SCREAMING_SNAKE_CASE, '''''', __SCREAMING_SNAKE_CASE ) for x in references] )
else:
lowerCamelCase_ =np.asarray(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
lowerCamelCase_ =np.char.lower(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
lowerCamelCase_ =string.punctuation.maketrans('''''', '''''', string.punctuation )
lowerCamelCase_ =np.char.translate(__SCREAMING_SNAKE_CASE, table=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.char.translate(__SCREAMING_SNAKE_CASE, table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
lowerCamelCase_ =string.digits.maketrans('''''', '''''', string.digits )
lowerCamelCase_ =np.char.translate(__SCREAMING_SNAKE_CASE, table=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =np.char.translate(__SCREAMING_SNAKE_CASE, table=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a_ : List[Any] = {
"""facebook/blenderbot_small-90M""": 5_12,
}
class __UpperCamelCase ( snake_case_ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Any =BlenderbotSmallTokenizer
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="<|endoftext|>", lowerCAmelCase="<|endoftext|>", lowerCAmelCase="<|endoftext|>", lowerCAmelCase=False, lowerCAmelCase=True, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase, merges=lowerCAmelCase, add_prefix_space=lowerCAmelCase, trim_offsets=lowerCAmelCase, ), bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =add_prefix_space
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ : Tuple = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a_ : Optional[Any] = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
a_ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
a_ : Any = dict(zip(vocab, range(len(vocab))))
a_ : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = Path(tmpdirname)
a_ : Any = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
a_ : List[str] = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
a_ : Tuple = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
a_ : Dict = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a_ : Any = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a_ : Optional[int] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
a_ : Any = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
a_ : Union[str, Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( snake_case_ , unittest.TestCase ):
lowercase : List[str] =XLMTokenizer
lowercase : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) )
with open(self.merges_file, '''w''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='lower newer'
lowerCamelCase_ ='lower newer'
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XLMTokenizer(self.vocab_file, self.merges_file )
lowerCamelCase_ ='lower'
lowerCamelCase_ =['low', 'er</w>']
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =tokens + ['<unk>']
lowerCamelCase_ =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase_ =tokenizer.encode('''sequence builders''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.encode('''multi-sequence build''', add_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
lowerCamelCase_ =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase, lowerCAmelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def a_ ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : list[int] , __snake_case : int , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =coefficient_matrix.shape
lowerCamelCase_ =constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ =F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(__snake_case )
if colsa != 1:
lowerCamelCase_ =F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(__snake_case )
if rowsa != rowsa:
lowerCamelCase_ =(
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(__snake_case )
if len(__snake_case ) != rowsa:
lowerCamelCase_ =(
'''Number of initial values must be equal to number of rows in coefficient '''
F'''matrix but received {len(__snake_case )} and {rowsa}'''
)
raise ValueError(__snake_case )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCamelCase_ =np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ =table.shape
strictly_diagonally_dominant(__snake_case )
# Iterates the whole matrix for given number of times
for _ in range(__snake_case ):
lowerCamelCase_ =[]
for row in range(__snake_case ):
lowerCamelCase_ =0
for col in range(__snake_case ):
if col == row:
lowerCamelCase_ =table[row][col]
elif col == cols - 1:
lowerCamelCase_ =table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ =(temp + val) / denom
new_val.append(__snake_case )
lowerCamelCase_ =new_val
return [float(__snake_case ) for i in new_val]
def a_ ( __snake_case : NDArray[floataa] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =table.shape
lowerCamelCase_ =True
for i in range(0 , __snake_case ):
lowerCamelCase_ =0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =IFImgaImgSuperResolutionPipeline
lowercase : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
lowercase : Any =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
lowercase : Optional[int] =PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(lowercase__ ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowercase__ )
else:
lowerCamelCase_ =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowercase__ ) ).to(lowercase__ )
lowerCamelCase_ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowercase__ ) ).to(lowercase__ )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __UpperCamelCase ( pl.LightningModule ):
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =model
lowerCamelCase_ =2
lowerCamelCase_ =nn.Linear(self.model.config.hidden_size, self.num_labels )
def lowercase__ ( self ):
"""simple docstring"""
pass
def a_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase_ =LightningModel(_UpperCAmelCase )
lowerCamelCase_ =torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowerCamelCase_ =LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : str = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
import string
from math import logaa
def a_ ( __snake_case : str , __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowerCamelCase_ =document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a_ ( __snake_case : str , __snake_case : str ) -> tuple[int, int]:
"""simple docstring"""
lowerCamelCase_ =corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ =corpus_without_punctuation.split('''\n''' )
lowerCamelCase_ =term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def a_ ( __snake_case : int , __snake_case : int , __snake_case : Dict=False ) -> float:
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def a_ ( __snake_case : int , __snake_case : int ) -> float:
"""simple docstring"""
return round(tf * idf , 3 )
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a_ : str = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def a_ ( __snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
state_dict.pop('''pixel_mean''' , __snake_case )
state_dict.pop('''pixel_std''' , __snake_case )
lowerCamelCase_ =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase_ =key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
lowerCamelCase_ =int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
lowerCamelCase_ =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
lowerCamelCase_ =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
lowerCamelCase_ =key.replace('''layers.2''' , '''proj_out''' )
lowerCamelCase_ =value
lowerCamelCase_ =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def a_ ( __snake_case : Any , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int="ybelkada/segment-anything" ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =hf_hub_download(__snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase_ =SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
lowerCamelCase_ =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase_ =SamConfig(
vision_config=__snake_case , )
lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase_ =replace_keys(__snake_case )
lowerCamelCase_ =SamImageProcessor()
lowerCamelCase_ =SamProcessor(image_processor=__snake_case )
lowerCamelCase_ =SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowerCamelCase_ =hf_model.to('''cuda''' )
lowerCamelCase_ ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase_ =[[[400, 650]]]
lowerCamelCase_ =[[1]]
lowerCamelCase_ =processor(images=np.array(__snake_case ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase_ =((75, 275, 1725, 850),)
lowerCamelCase_ =processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase_ =[[[400, 650], [800, 650]]]
lowerCamelCase_ =[[1, 1]]
lowerCamelCase_ =processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
lowerCamelCase_ =hf_model(**__snake_case )
lowerCamelCase_ =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
a_ : Any = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
a_ : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Tuple = logging.get_logger(__name__)
def a_ ( __snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase_ =MaskFormerConfig(backbone_config=__lowerCAmelCase )
lowerCamelCase_ ="""huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase_ =847
lowerCamelCase_ ="""maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
lowerCamelCase_ =150
lowerCamelCase_ ="""ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase_ =171
lowerCamelCase_ ="""maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
lowerCamelCase_ =133
lowerCamelCase_ ="""coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase_ =19
lowerCamelCase_ ="""cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
lowerCamelCase_ =65
lowerCamelCase_ ="""mapillary-vistas-id2label.json"""
lowerCamelCase_ =json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def a_ ( __snake_case : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') )
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def a_ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =dct.pop(__lowerCAmelCase )
lowerCamelCase_ =val
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase_ =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
lowerCamelCase_ =state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[:dim, :]
lowerCamelCase_ =in_proj_bias[: dim]
lowerCamelCase_ =in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase_ =in_proj_bias[
dim : dim * 2
]
lowerCamelCase_ =in_proj_weight[
-dim :, :
]
lowerCamelCase_ =in_proj_bias[-dim :]
# fmt: on
def a_ ( __snake_case : str , __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
# fmt: off
lowerCamelCase_ =config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
lowerCamelCase_ =state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[: hidden_size, :]
lowerCamelCase_ =in_proj_bias[:config.hidden_size]
lowerCamelCase_ =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase_ =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase_ =in_proj_weight[-hidden_size :, :]
lowerCamelCase_ =in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
lowerCamelCase_ =state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[: hidden_size, :]
lowerCamelCase_ =in_proj_bias[:config.hidden_size]
lowerCamelCase_ =in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase_ =in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase_ =in_proj_weight[-hidden_size :, :]
lowerCamelCase_ =in_proj_bias[-hidden_size :]
# fmt: on
def a_ ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase_ =Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def a_ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] = False ) -> str:
"""simple docstring"""
lowerCamelCase_ =get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , '''rb''' ) as f:
lowerCamelCase_ =pickle.load(__lowerCAmelCase )
lowerCamelCase_ =data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase_ =create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase_ =torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
lowerCamelCase_ =MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
lowerCamelCase_ =model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase_ =prepare_img()
if "vistas" in model_name:
lowerCamelCase_ =65
elif "cityscapes" in model_name:
lowerCamelCase_ =6_5535
else:
lowerCamelCase_ =255
lowerCamelCase_ =True if """ade""" in model_name else False
lowerCamelCase_ =MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
lowerCamelCase_ =image_processor(__lowerCAmelCase , return_tensors='''pt''' )
lowerCamelCase_ =model(**__lowerCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase_ =torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F'''nielsr/{model_name}''' )
image_processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ : Dict = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( _A , unittest.TestCase ):
lowercase : Union[str, Any] =LayoutLMTokenizer
lowercase : Tuple =LayoutLMTokenizerFast
lowercase : List[str] =True
lowercase : Optional[int] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **__SCREAMING_SNAKE_CASE )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ='''UNwant\u00E9d,running'''
lowerCamelCase_ ='''unwanted, running'''
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer_class(self.vocab_file )
lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ), [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self ):
"""simple docstring"""
pass
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : List[Any] = logging.get_logger(__name__)
a_ : List[str] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
lowercase : Tuple ='data2vec-text'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE, bos_token_id=_SCREAMING_SNAKE_CASE, eos_token_id=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ ={0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ ={0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ : Any = None
a_ : int = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ : Dict = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class __UpperCamelCase :
lowercase : bool =True
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="PIL.Image.Image"
lowercase : ClassVar[Any] =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowercase : str =field(default='Image' , init=__snake_case , repr=__snake_case )
def __call__( self ):
"""simple docstring"""
return self.pa_type
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase_ =np.array(lowerCamelCase_ )
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase_, lowerCamelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase_ )
elif isinstance(lowerCamelCase_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase_ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowerCamelCase_ ={}
lowerCamelCase_ =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCamelCase_ ):
lowerCamelCase_ =PIL.Image.open(lowerCamelCase_ )
else:
lowerCamelCase_ =path.split('''::''' )[-1]
try:
lowerCamelCase_ =string_to_dict(lowerCamelCase_, config.HUB_DATASETS_URL )["""repo_id"""]
lowerCamelCase_ =token_per_repo_id.get(lowerCamelCase_ )
except ValueError:
lowerCamelCase_ =None
with xopen(lowerCamelCase_, '''rb''', use_auth_token=lowerCamelCase_ ) as f:
lowerCamelCase_ =BytesIO(f.read() )
lowerCamelCase_ =PIL.Image.open(bytes_ )
else:
lowerCamelCase_ =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase__ ( self ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowerCamelCase_ =pa.array([None] * len(lowerCamelCase_ ), type=pa.binary() )
lowerCamelCase_ =pa.StructArray.from_arrays([bytes_array, storage], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase_ =pa.array([None] * len(lowerCamelCase_ ), type=pa.string() )
lowerCamelCase_ =pa.StructArray.from_arrays([storage, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCamelCase_ =storage.field('''bytes''' )
else:
lowerCamelCase_ =pa.array([None] * len(lowerCamelCase_ ), type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCamelCase_ =storage.field('''path''' )
else:
lowerCamelCase_ =pa.array([None] * len(lowerCamelCase_ ), type=pa.string() )
lowerCamelCase_ =pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase_ =pa.array(
[encode_np_array(np.array(lowerCamelCase_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
lowerCamelCase_ =pa.array([None] * len(lowerCamelCase_ ), type=pa.string() )
lowerCamelCase_ =pa.StructArray.from_arrays(
[bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_, self.pa_type )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase ):
with xopen(lowerCamelCase_, '''rb''' ) as f:
lowerCamelCase_ =f.read()
return bytes_
lowerCamelCase_ =pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
lowerCamelCase_ =pa.array(
[os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()], type=pa.string(), )
lowerCamelCase_ =pa.StructArray.from_arrays([bytes_array, path_array], ['''bytes''', '''path'''], mask=bytes_array.is_null() )
return array_cast(lowerCamelCase_, self.pa_type )
def a_ ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase_ =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a_ ( __snake_case : "PIL.Image.Image" ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase_ =image.format
else:
lowerCamelCase_ ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_a , format=_a )
return buffer.getvalue()
def a_ ( __snake_case : "PIL.Image.Image" ) -> Tuple:
"""simple docstring"""
if hasattr(_a , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_a )}
def a_ ( __snake_case : np.ndarray ) -> Tuple:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowerCamelCase_ =array.dtype
lowerCamelCase_ =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowerCamelCase_ =dtype.kind
lowerCamelCase_ =dtype.itemsize
lowerCamelCase_ =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase_ =np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase_ =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase_ =dtype_byteorder + dtype_kind + str(_a )
lowerCamelCase_ =np.dtype(_a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowerCamelCase_ =PIL.Image.fromarray(array.astype(_a ) )
return {"path": None, "bytes": image_to_bytes(_a )}
def a_ ( __snake_case : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> Union[str, Any]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowerCamelCase_ =first_non_null_value(_a )
if isinstance(_a , _a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_a , np.ndarray ):
lowerCamelCase_ =no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
elif isinstance(_a , PIL.Image.Image ):
lowerCamelCase_ =no_op_if_value_is_null(_a )
return [obj_to_image_dict_func(_a ) for obj in objs]
else:
return objs
else:
return objs
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a_ ( __snake_case : Tuple="" ) -> str:
"""simple docstring"""
lowerCamelCase_ =tempfile.mkdtemp()
return os.path.join(SCREAMING_SNAKE_CASE_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.rand(12, dtype=torch.floataa ) - 0.5
lowerCamelCase_ =AgentAudio(_lowercase )
lowerCamelCase_ =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase, agent_type.to_raw(), atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowercase ) )
# Ensure that the file contains the same value as the original tensor
lowerCamelCase_, lowerCamelCase_ =sf.read(_lowercase )
self.assertTrue(torch.allclose(_lowercase, torch.tensor(_lowercase ), atol=1e-4 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.rand(12, dtype=torch.floataa ) - 0.5
lowerCamelCase_ =get_new_path(suffix='''.wav''' )
sf.write(_lowercase, _lowercase, 16_000 )
lowerCamelCase_ =AgentAudio(_lowercase )
self.assertTrue(torch.allclose(_lowercase, agent_type.to_raw(), atol=1e-4 ) )
self.assertEqual(agent_type.to_string(), _lowercase )
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch.randint(0, 256, (64, 64, 3) )
lowerCamelCase_ =AgentImage(_lowercase )
lowerCamelCase_ =str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase, agent_type._tensor, atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ =Image.open(_lowercase )
lowerCamelCase_ =AgentImage(_lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
lowerCamelCase_ =Image.open(_lowercase )
lowerCamelCase_ =AgentImage(_lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''Hey!'''
lowerCamelCase_ =AgentText(_lowercase )
self.assertEqual(_lowercase, agent_type.to_string() )
self.assertEqual(_lowercase, agent_type.to_raw() )
self.assertEqual(_lowercase, _lowercase )
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
a_ : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def a_ ( __snake_case : List[str] , __snake_case : tuple , __snake_case : Path , __snake_case : Any , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int]=False , ) -> str:
"""simple docstring"""
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def a_ ( __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : bool = False ) -> str:
"""simple docstring"""
lowerCamelCase_ =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase_ ='''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
lowerCamelCase_ =AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowerCamelCase_ =vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase_ =vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
a_ : Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a_ ( __snake_case : Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =filter(lambda __snake_case : p.requires_grad , model.parameters() )
lowerCamelCase_ =sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ : List[Any] = logging.getLogger(__name__)
def a_ ( __snake_case : Any , __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
if metric == "rouge2":
lowerCamelCase_ ="""{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowerCamelCase_ ="""{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowerCamelCase_ ="""{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
''' function.''' )
lowerCamelCase_ =ModelCheckpoint(
dirpath=__snake_case , filename=__snake_case , monitor=F'''val_{metric}''' , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
return EarlyStopping(
monitor=F'''val_{metric}''' , mode='''min''' if '''loss''' in metric else '''max''' , patience=__snake_case , verbose=__snake_case , )
class __UpperCamelCase ( pl.Callback ):
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=True ):
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
lowerCamelCase_ =trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCamelCase_ =Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase_ =od / """test_results.txt"""
lowerCamelCase_ =od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase_ =od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
lowerCamelCase_ =od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase, '''a+''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase_ =metrics[key]
if isinstance(__lowerCamelCase, torch.Tensor ):
lowerCamelCase_ =val.item()
lowerCamelCase_ =f'''{key}: {val:.6f}\n'''
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase_ ="""\n""".join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCamelCase )
@rank_zero_only
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
try:
lowerCamelCase_ =pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase_ =pl_module.model.num_parameters()
lowerCamelCase_ =count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase, __lowerCamelCase, '''test''' )
@rank_zero_only
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
'''simple docstring'''
import argparse
import copy
def a_ ( __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ ={}
with open(a__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCamelCase_ =[]
_list.append([line.split()[1], line.split()[2]] )
lowerCamelCase_ =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCamelCase_ =[]
_list.append([line.split()[0], line.split()[2]] )
lowerCamelCase_ =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a_ ( __snake_case : int , __snake_case : Dict ) -> List[str]:
"""simple docstring"""
with open(a__ ) as f:
lowerCamelCase_ =f.read(1 )
lowerCamelCase_ =start_node
lowerCamelCase_ =[]
lowerCamelCase_ =start_node
lowerCamelCase_ =0
while visiting not in first_solution:
lowerCamelCase_ =1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a__ ) and k[0] not in first_solution:
lowerCamelCase_ =k[1]
lowerCamelCase_ =k[0]
first_solution.append(a__ )
lowerCamelCase_ =distance_of_first_solution + int(a__ )
lowerCamelCase_ =best_node
first_solution.append(a__ )
lowerCamelCase_ =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCamelCase_ =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def a_ ( __snake_case : Any , __snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =[]
for n in solution[1:-1]:
lowerCamelCase_ =solution.index(a__ )
for kn in solution[1:-1]:
lowerCamelCase_ =solution.index(a__ )
if n == kn:
continue
lowerCamelCase_ =copy.deepcopy(a__ )
lowerCamelCase_ =kn
lowerCamelCase_ =n
lowerCamelCase_ =0
for k in _tmp[:-1]:
lowerCamelCase_ =_tmp[_tmp.index(a__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCamelCase_ =distance + int(i[1] )
_tmp.append(a__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCamelCase_ =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ =1
lowerCamelCase_ =first_solution
lowerCamelCase_ =[]
lowerCamelCase_ =distance_of_first_solution
lowerCamelCase_ =solution
while count <= iters:
lowerCamelCase_ =find_neighborhood(a__ , a__ )
lowerCamelCase_ =0
lowerCamelCase_ =neighborhood[index_of_best_solution]
lowerCamelCase_ =len(a__ ) - 1
lowerCamelCase_ =False
while not found:
lowerCamelCase_ =0
while i < len(a__ ):
if best_solution[i] != solution[i]:
lowerCamelCase_ =best_solution[i]
lowerCamelCase_ =solution[i]
break
lowerCamelCase_ =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCamelCase_ =True
lowerCamelCase_ =best_solution[:-1]
lowerCamelCase_ =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCamelCase_ =cost
lowerCamelCase_ =solution
else:
lowerCamelCase_ =index_of_best_solution + 1
lowerCamelCase_ =neighborhood[index_of_best_solution]
if len(a__ ) >= size:
tabu_list.pop(0 )
lowerCamelCase_ =count + 1
return best_solution_ever, best_cost
def a_ ( __snake_case : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =generate_neighbours(args.File )
lowerCamelCase_ , lowerCamelCase_ =generate_first_solution(
args.File , a__ )
lowerCamelCase_ , lowerCamelCase_ =tabu_search(
a__ , a__ , a__ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : List[str] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =hex_num.strip()
if not hex_num:
raise ValueError('''No value was passed to the function''' )
lowerCamelCase_ =hex_num[0] == "-"
if is_negative:
lowerCamelCase_ =hex_num[1:]
try:
lowerCamelCase_ =int(_SCREAMING_SNAKE_CASE , 16 )
except ValueError:
raise ValueError('''Invalid value was passed to the function''' )
lowerCamelCase_ =""
while int_num > 0:
lowerCamelCase_ =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('''-''' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __UpperCamelCase :
lowercase : Optional[Any] =42
# setable values
lowercase : List[Any] =42
lowercase : Tuple =42
lowercase : Any =None
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return cls(common=lowerCAmelCase, init_noise_sigma=lowerCAmelCase, timesteps=lowerCAmelCase )
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =42
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : List[str] =[e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase : Dict =42
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_1, lowerCAmelCase = 0.0_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = "fixed_small", lowerCAmelCase = True, lowerCAmelCase = "epsilon", lowerCAmelCase = jnp.floataa, ):
"""simple docstring"""
lowerCamelCase_ =dtype
def lowercase__ ( self, lowerCAmelCase = None ):
"""simple docstring"""
if common is None:
lowerCamelCase_ =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase_ =jnp.array(1.0, dtype=self.dtype )
lowerCamelCase_ =jnp.arange(0, self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase, init_noise_sigma=lowerCAmelCase, timesteps=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = () ):
"""simple docstring"""
lowerCamelCase_ =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(jnp.arange(0, lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase, timesteps=lowerCAmelCase, )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None ):
"""simple docstring"""
lowerCamelCase_ =state.common.alphas_cumprod[t]
lowerCamelCase_ =jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase_ =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase_ =jnp.clip(lowerCAmelCase, a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase_ =jnp.log(jnp.clip(lowerCAmelCase, a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase_ =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase_ =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase_ =variance
lowerCamelCase_ =state.common.betas[t]
lowerCamelCase_ =(predicted_variance + 1) / 2
lowerCamelCase_ =frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =timestep
if key is None:
lowerCamelCase_ =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase_ =jnp.split(lowerCAmelCase, sample.shape[1], axis=1 )
else:
lowerCamelCase_ =None
# 1. compute alphas, betas
lowerCamelCase_ =state.common.alphas_cumprod[t]
lowerCamelCase_ =jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
lowerCamelCase_ =1 - alpha_prod_t
lowerCamelCase_ =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ =model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ =jnp.clip(lowerCAmelCase, -1, 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase_ =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase_ =jax.random.split(lowerCAmelCase, num=1 )
lowerCamelCase_ =jax.random.normal(lowerCAmelCase, shape=model_output.shape, dtype=self.dtype )
return (self._get_variance(lowerCAmelCase, lowerCAmelCase, predicted_variance=lowerCAmelCase ) ** 0.5) * noise
lowerCamelCase_ =jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype ) )
lowerCamelCase_ =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase, state=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
return add_noise_common(state.common, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
return get_velocity_common(state.common, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
a_ : Optional[int] = True
except ImportError:
a_ : Optional[int] = False
a_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( __snake_case : Namespace ) -> Dict:
"""simple docstring"""
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __UpperCamelCase ( _lowerCamelCase ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''add-new-model''' )
add_new_model_parser.add_argument('''--testing''', action='''store_true''', help='''If in testing mode.''' )
add_new_model_parser.add_argument('''--testing_file''', type=lowerCAmelCase, help='''Configuration file on which to run.''' )
add_new_model_parser.add_argument(
'''--path''', type=lowerCAmelCase, help='''Path to cookiecutter. Should only be used for testing purposes.''' )
add_new_model_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, *lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =testing
lowerCamelCase_ =testing_file
lowerCamelCase_ =path
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''' )
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase_ =[directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(lowerCAmelCase ) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''' )
lowerCamelCase_ =(
Path(lowerCAmelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase_ =path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase ) )
else:
with open(self._testing_file, '''r''' ) as configuration_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=lowerCAmelCase, extra_context=lowerCAmelCase, )
lowerCamelCase_ =[directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''', '''r''' ) as configuration_file:
lowerCamelCase_ =json.load(lowerCAmelCase )
lowerCamelCase_ =configuration['''lowercase_modelname''']
lowerCamelCase_ =configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(f'''{directory}/configuration.json''' )
lowerCamelCase_ ='''PyTorch''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ ='''TensorFlow''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ ='''Flax''' in generate_tensorflow_pytorch_and_flax
lowerCamelCase_ =f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''', exist_ok=lowerCAmelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''', '''w''' ):
pass
shutil.move(
f'''{directory}/__init__.py''', f'''{model_dir}/__init__.py''', )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''', f'''{model_dir}/configuration_{lowercase_model_name}.py''', )
def remove_copy_lines(lowerCAmelCase ):
with open(lowerCAmelCase, '''r''' ) as f:
lowerCamelCase_ =f.readlines()
with open(lowerCAmelCase, '''w''' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''', f'''{model_dir}/modeling_{lowercase_model_name}.py''', )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''', f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''', )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''', f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''', )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''', f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''', )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''', f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''', )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''', f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''', )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''', f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''', )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''', f'''{model_dir}/tokenization_{lowercase_model_name}.py''', )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''', f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''', )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
# Create temp file
lowerCamelCase_, lowerCamelCase_ =mkstemp()
lowerCamelCase_ =False
with fdopen(lowerCAmelCase, '''w''' ) as new_file:
with open(lowerCAmelCase ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase )
if line_to_copy_below in line:
lowerCamelCase_ =True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase, lowerCAmelCase )
# Remove original file
remove(lowerCAmelCase )
# Move new file
move(lowerCAmelCase, lowerCAmelCase )
def skip_units(lowerCAmelCase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCAmelCase ):
with open(lowerCAmelCase ) as datafile:
lowerCamelCase_ =[]
lowerCamelCase_ =False
lowerCamelCase_ =False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase_ =line.split('''\"''' )[1]
lowerCamelCase_ =skip_units(lowerCAmelCase )
elif "# Below: " in line and "##" not in line:
lowerCamelCase_ =line.split('''\"''' )[1]
lowerCamelCase_ =skip_units(lowerCAmelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =[]
elif "# Replace with" in line and "##" not in line:
lowerCamelCase_ =[]
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase )
remove(lowerCAmelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowerCAmelCase )
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =384
lowerCamelCase_ =7
if "tiny" in model_name:
lowerCamelCase_ =96
lowerCamelCase_ =(2, 2, 6, 2)
lowerCamelCase_ =(3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase_ =96
lowerCamelCase_ =(2, 2, 18, 2)
lowerCamelCase_ =(3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase_ =128
lowerCamelCase_ =(2, 2, 18, 2)
lowerCamelCase_ =(4, 8, 16, 32)
lowerCamelCase_ =12
lowerCamelCase_ =512
elif "large" in model_name:
lowerCamelCase_ =192
lowerCamelCase_ =(2, 2, 18, 2)
lowerCamelCase_ =(6, 12, 24, 48)
lowerCamelCase_ =12
lowerCamelCase_ =768
# set label information
lowerCamelCase_ =150
lowerCamelCase_ ="huggingface/label-files"
lowerCamelCase_ ="ade20k-id2label.json"
lowerCamelCase_ =json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(_lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =SwinConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , num_heads=_lowerCAmelCase , window_size=_lowerCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCamelCase_ =UperNetConfig(
backbone_config=_lowerCAmelCase , auxiliary_in_channels=_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , )
return config
def a_ ( __snake_case : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[]
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.norm2.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', F'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.stages.{i}.downsample.reduction.weight''', F'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.weight''', F'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.downsample.norm.bias''', F'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __snake_case : Any , __snake_case : Dict , __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =dct.pop(_lowerCAmelCase )
lowerCamelCase_ =val
def a_ ( __snake_case : Optional[int] , __snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase_ =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase_ =state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
lowerCamelCase_ =state_dict.pop(F'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ =in_proj_weight[:dim, :]
lowerCamelCase_ =in_proj_bias[: dim]
lowerCamelCase_ =in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase_ =in_proj_bias[
dim : dim * 2
]
lowerCamelCase_ =in_proj_weight[
-dim :, :
]
lowerCamelCase_ =in_proj_bias[-dim :]
# fmt: on
def a_ ( __snake_case : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =x.shape
lowerCamelCase_ =x.reshape(_lowerCAmelCase , 4 , in_channel // 4 )
lowerCamelCase_ =x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
return x
def a_ ( __snake_case : Optional[int] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =x.shape
lowerCamelCase_ =x.reshape(_lowerCAmelCase , in_channel // 4 , 4 )
lowerCamelCase_ =x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_lowerCAmelCase , _lowerCAmelCase )
return x
def a_ ( __snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =x.shape[0]
lowerCamelCase_ =x.reshape(4 , in_channel // 4 )
lowerCamelCase_ =x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_lowerCAmelCase )
return x
def a_ ( __snake_case : List[str] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =x.shape[0]
lowerCamelCase_ =x.reshape(in_channel // 4 , 4 )
lowerCamelCase_ =x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_lowerCAmelCase )
return x
def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowerCamelCase_ =model_name_to_url[model_name]
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='''cpu''' , file_name=_lowerCAmelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(_lowerCAmelCase , param.shape )
lowerCamelCase_ =get_upernet_config(_lowerCAmelCase )
lowerCamelCase_ =UperNetForSemanticSegmentation(_lowerCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ =state_dict.pop(_lowerCAmelCase )
if "bn" in key:
lowerCamelCase_ =key.replace('''bn''' , '''batch_norm''' )
lowerCamelCase_ =val
# rename keys
lowerCamelCase_ =create_rename_keys(_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase_ =reverse_correct_unfold_reduction_order(_lowerCAmelCase )
if "norm" in key:
lowerCamelCase_ =reverse_correct_unfold_norm_order(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
# verify on image
lowerCamelCase_ ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase_ =Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert('''RGB''' )
lowerCamelCase_ =SegformerImageProcessor()
lowerCamelCase_ =processor(_lowerCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase_ =model(_lowerCAmelCase )
lowerCamelCase_ =outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase_ =torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCamelCase_ =torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCamelCase_ =torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCamelCase_ =torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Dict =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Tuple =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Dict =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Optional[Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Union[str, Any] =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['flax']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a_ ( __snake_case : List[Any] , __snake_case : List[str] ) -> List[str]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCamelCase_ =state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowerCamelCase_ =in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCamelCase_ =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCamelCase_ =in_proj_weight[
-encoder_config.hidden_size :, :
]
def a_ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ =dct.pop(lowercase_ )
lowerCamelCase_ =val
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
lowerCamelCase_ ='''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ ='''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
lowerCamelCase_ =Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a_ ( __snake_case : Any , __snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =ViTConfig(image_size=384 , qkv_bias=lowercase_ )
lowerCamelCase_ =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCamelCase_ =768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCamelCase_ =1024
lowerCamelCase_ =4096
lowerCamelCase_ =24
lowerCamelCase_ =16
lowerCamelCase_ =1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCamelCase_ =False
lowerCamelCase_ ='''relu'''
lowerCamelCase_ =1024
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =False
# load HuggingFace model
lowerCamelCase_ =ViTModel(lowercase_ , add_pooling_layer=lowercase_ )
lowerCamelCase_ =TrOCRForCausalLM(lowercase_ )
lowerCamelCase_ =VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
# load state_dict of original model, rename some keys
lowerCamelCase_ =torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' , check_hash=lowercase_ )['''model''']
lowerCamelCase_ =create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCamelCase_ =state_dict.pop(lowercase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
lowerCamelCase_ =val
else:
lowerCamelCase_ =val
# load state dict
model.load_state_dict(lowercase_ )
# Check outputs on an image
lowerCamelCase_ =ViTImageProcessor(size=encoder_config.image_size )
lowerCamelCase_ =RobertaTokenizer.from_pretrained('''roberta-large''' )
lowerCamelCase_ =TrOCRProcessor(lowercase_ , lowercase_ )
lowerCamelCase_ =processor(images=prepare_img(lowercase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
lowerCamelCase_ =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCamelCase_ =model(pixel_values=lowercase_ , decoder_input_ids=lowercase_ )
lowerCamelCase_ =outputs.logits
lowerCamelCase_ =torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
lowerCamelCase_ =torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a_ : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
from math import factorial
a_ : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def a_ ( __snake_case : List[Any] ) -> str:
"""simple docstring"""
if not isinstance(A_ , A_ ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) )
def a_ ( __snake_case : List[Any] = 60 , __snake_case : int = 100_0000 ) -> Tuple:
"""simple docstring"""
if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
lowerCamelCase_ =0
# the cached sizes of the previous chains
lowerCamelCase_ ={}
for start_chain_element in range(1 , A_ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ =set()
lowerCamelCase_ =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A_ )
chain_set_length += 1
lowerCamelCase_ =digit_factorial_sum(A_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from math import factorial
def a_ ( __snake_case : Union[str, Any] = 100 ) -> Tuple:
"""simple docstring"""
return sum(int(_lowercase ) for x in str(factorial(_lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =9, 14 # noqa: F841
lowerCamelCase_ =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase_ =defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ =mst(__snake_case )
lowerCamelCase_ =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ =tuple(answer[:2] )
lowerCamelCase_ =tuple(edge[::-1] )
assert edge in result or reverse in result
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : float , __snake_case : float , __snake_case : float , ) -> List[Any]:
"""simple docstring"""
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''' )
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''' )
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : Tuple ) -> list:
"""simple docstring"""
lowerCamelCase_ =[0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowerCamelCase_ =prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCamelCase_ =prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCamelCase_ =j
return prefix_result
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def a_ ( __snake_case : str ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =384
if "tiny" in model_name:
lowerCamelCase_ =[3, 3, 9, 3]
lowerCamelCase_ =[96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase_ =[3, 3, 27, 3]
lowerCamelCase_ =[96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase_ =[3, 3, 27, 3]
lowerCamelCase_ =[128, 256, 512, 1024]
lowerCamelCase_ =512
if "large" in model_name:
lowerCamelCase_ =[3, 3, 27, 3]
lowerCamelCase_ =[192, 384, 768, 1536]
lowerCamelCase_ =768
if "xlarge" in model_name:
lowerCamelCase_ =[3, 3, 27, 3]
lowerCamelCase_ =[256, 512, 1024, 2048]
lowerCamelCase_ =1024
# set label information
lowerCamelCase_ =150
lowerCamelCase_ ="huggingface/label-files"
lowerCamelCase_ ="ade20k-id2label.json"
lowerCamelCase_ =json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ ={int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ ={v: k for k, v in idalabel.items()}
lowerCamelCase_ =ConvNextConfig(
depths=_lowercase , hidden_sizes=_lowercase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase_ =UperNetConfig(
backbone_config=_lowercase , auxiliary_in_channels=_lowercase , num_labels=_lowercase , idalabel=_lowercase , labelaid=_lowercase , )
return config
def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =dct.pop(_lowercase )
lowerCamelCase_ =val
def a_ ( __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
lowerCamelCase_ =model_name_to_url[model_name]
lowerCamelCase_ =torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )["state_dict"]
lowerCamelCase_ =get_upernet_config(_lowercase )
lowerCamelCase_ =UperNetForSemanticSegmentation(_lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase_ =state_dict.pop(_lowercase )
if "bn" in key:
lowerCamelCase_ =key.replace('''bn''' , '''batch_norm''' )
lowerCamelCase_ =val
# rename keys
lowerCamelCase_ =create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
model.load_state_dict(_lowercase )
# verify on image
lowerCamelCase_ ="https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase_ =Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' )
lowerCamelCase_ =SegformerImageProcessor()
lowerCamelCase_ =processor(_lowercase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase_ =model(_lowercase )
if model_name == "upernet-convnext-tiny":
lowerCamelCase_ =torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase_ =torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase_ =torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase_ =torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase_ =torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
)
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =[90, 23, 6, 33, 21, 65, 123, 3_4423]
lowerCamelCase_ =math.log(len(__snake_case ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __snake_case , __snake_case , __snake_case )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def a_ ( __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a_ ( *__snake_case : List[Any] , __snake_case : int = None , __snake_case : Union[str, Any]=True , __snake_case : List[str]=2 ) -> List[Any]:
"""simple docstring"""
from .. import __version__
lowerCamelCase_ =take_from
lowerCamelCase_ =()
if not isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase_ =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
lowerCamelCase_ =None
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE__ ),)
lowerCamelCase_ =F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
values += (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
lowerCamelCase_ =F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowerCamelCase_ =F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowerCamelCase_ =warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , SCREAMING_SNAKE_CASE__ , stacklevel=SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCamelCase_ =inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase_ =call_frame.filename
lowerCamelCase_ =call_frame.lineno
lowerCamelCase_ =call_frame.function
lowerCamelCase_ =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE__ ) == 1:
return values[0]
return values
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
a_ : List[str] = 1.0_5457_1817e-34 # unit of ℏ : J * s
a_ : Union[str, Any] = 3e8 # unit of c : m * s^-1
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
lowerCamelCase_ =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase_ =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase_ =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
'''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __UpperCamelCase ( __a ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ =pa.array(TypedSequence([1, 2, 3] ), type=pa.intaa() )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(UpperCamelCase__ ):
lowerCamelCase_ =pa.array(TypedSequence([1, 2, 3], try_type=Value('''bool''' ), type=Value('''int64''' ) ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence([1, 2, 3], type=Value('''int32''' ) ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase_ =pa.array(TypedSequence(['''foo''', '''bar'''], type=Value('''int64''' ) ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence([1, 2, 3], try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type, pa.intaa() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence(['''foo''', '''bar'''], try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type, pa.string() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence([[[1, 2, 3]]], type=ArrayaD((1, 3), '''int64''' ) ) )
self.assertEqual(arr.type, ArrayaDExtensionType((1, 3), '''int64''' ) )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase_ =pa.array(TypedSequence(['''foo''', '''bar'''], type=ArrayaD((1, 3), '''int64''' ) ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence([[[1, 2, 3]]], try_type=ArrayaD((1, 3), '''int64''' ) ) )
self.assertEqual(arr.type, ArrayaDExtensionType((1, 3), '''int64''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence(['''foo''', '''bar'''], try_type=ArrayaD((1, 3), '''int64''' ) ) )
self.assertEqual(arr.type, pa.string() )
@require_pil
def lowercase__ ( self ):
"""simple docstring"""
import PIL.Image
lowerCamelCase_ =PIL.Image.fromarray(np.arange(10, dtype=np.uinta ).reshape(2, 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''', side_effect=UpperCamelCase__ ) as mock_cast_to_python_objects:
lowerCamelCase_ =pa.array(TypedSequence([{'''path''': None, '''bytes''': B'''image_bytes'''}, pil_image], type=Image() ) )
lowerCamelCase_ , lowerCamelCase_ =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''', UpperCamelCase__ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def a_ ( __snake_case : List[str] , __snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferReader(__UpperCamelCase ) if isinstance(__UpperCamelCase , pa.Buffer ) else pa.memory_map(__UpperCamelCase )
lowerCamelCase_ =pa.ipc.open_stream(__UpperCamelCase )
lowerCamelCase_ =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def a_ ( __snake_case : List[str] , __snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
lowerCamelCase_ =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase_ ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def a_ ( ) -> int:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
lowerCamelCase_ =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__UpperCamelCase , features=__UpperCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase_ =pa.BufferReader(output.getvalue() )
lowerCamelCase_ =pa.ipc.open_stream(__UpperCamelCase )
lowerCamelCase_ =f.read_all()
lowerCamelCase_ =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__UpperCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def a_ ( __snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def a_ ( __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
with pytest.raises(__UpperCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def a_ ( __snake_case : str ) -> int:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
with ArrowWriter(
stream=__UpperCamelCase , writer_batch_size=__UpperCamelCase , hash_salt='''split_name''' , check_duplicates=__UpperCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def a_ ( __snake_case : Dict , __snake_case : Any ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
lowerCamelCase_ =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase_ ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def a_ ( __snake_case : List[Any] , __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
lowerCamelCase_ =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase_ ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def a_ ( __snake_case : str , __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
lowerCamelCase_ =pa.schema(__UpperCamelCase ) if fields else None
with ArrowWriter(stream=__UpperCamelCase , schema=__UpperCamelCase , writer_batch_size=__UpperCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase_ ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def a_ ( ) -> List[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase_ ={'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowerCamelCase_ =os.path.join(__UpperCamelCase , '''test.arrow''' )
with ArrowWriter(path=__UpperCamelCase , schema=pa.schema(__UpperCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__UpperCamelCase , metadata=writer._schema.metadata )
_check_output(__UpperCamelCase , 1 )
def a_ ( __snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if pa.types.is_list(__UpperCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def a_ ( __snake_case : Union[str, Any] , __snake_case : Dict ) -> List[Any]:
"""simple docstring"""
if isinstance(lst[0] , __UpperCamelCase ):
change_first_primitive_element_in_list(lst[0] , __UpperCamelCase )
else:
lowerCamelCase_ =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def a_ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =pa.array(TypedSequence(__UpperCamelCase , optimized_int_type=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase_ =copy.deepcopy(__UpperCamelCase )
lowerCamelCase_ =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase_ =pa.array(OptimizedTypedSequence(__UpperCamelCase , col=__UpperCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def a_ ( __snake_case : str , __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__UpperCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def a_ ( __snake_case : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ ='''mock://dataset-train.arrow'''
with ArrowWriter(path=__UpperCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__UpperCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__UpperCamelCase )
def a_ ( ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =pa.BufferOutputStream()
with ParquetWriter(stream=__UpperCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowerCamelCase_ , lowerCamelCase_ =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase_ =pa.BufferReader(output.getvalue() )
lowerCamelCase_ =pq.read_table(__UpperCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def a_ ( __snake_case : Optional[int] , __snake_case : int ) -> Dict:
"""simple docstring"""
import PIL.Image
lowerCamelCase_ =str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__UpperCamelCase , format='''png''' )
lowerCamelCase_ =pa.BufferOutputStream()
with ParquetWriter(
stream=__UpperCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=__UpperCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowerCamelCase_ =pa.BufferReader(output.getvalue() )
lowerCamelCase_ =pq.read_table(__UpperCamelCase )
lowerCamelCase_ =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , __UpperCamelCase )
with open(__UpperCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =pa.schema([pa.field('''col_1''' , pa.string() , nullable=__UpperCamelCase )] )
lowerCamelCase_ =pa.BufferOutputStream()
with ArrowWriter(stream=__UpperCamelCase ) as writer:
writer._build_writer(inferred_schema=__UpperCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : Optional[Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __UpperCamelCase ( _lowerCAmelCase ):
lowercase : Union[str, Any] ="mobilenet_v2"
def __init__( self, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=1.0, lowerCAmelCase=8, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=32, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="relu6", lowerCAmelCase=True, lowerCAmelCase=0.8, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_0_1, lowerCAmelCase=255, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**_lowercase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =depth_multiplier
lowerCamelCase_ =depth_divisible_by
lowerCamelCase_ =min_depth
lowerCamelCase_ =expand_ratio
lowerCamelCase_ =output_stride
lowerCamelCase_ =first_layer_is_expansion
lowerCamelCase_ =finegrained_output
lowerCamelCase_ =hidden_act
lowerCamelCase_ =tf_padding
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =semantic_loss_ignore_index
class __UpperCamelCase ( _lowerCAmelCase ):
lowercase : Any =version.parse('1.11' )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
a_ : int = """scheduler_config.json"""
class __UpperCamelCase ( _a ):
lowercase : List[Any] =1
lowercase : str =2
lowercase : Dict =3
lowercase : Any =4
lowercase : str =5
@dataclass
class __UpperCamelCase ( _a ):
lowercase : jnp.ndarray
class __UpperCamelCase :
lowercase : Optional[Any] =SCHEDULER_CONFIG_NAME
lowercase : Tuple =["""dtype"""]
lowercase : List[str] =[]
lowercase : str =True
@classmethod
def lowercase__ ( cls, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase=False, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =cls.load_config(
pretrained_model_name_or_path=__lowerCAmelCase, subfolder=__lowerCAmelCase, return_unused_kwargs=__lowerCAmelCase, **__lowerCAmelCase, )
lowerCamelCase_, lowerCamelCase_ =cls.from_config(__lowerCAmelCase, return_unused_kwargs=__lowerCAmelCase, **__lowerCAmelCase )
if hasattr(__lowerCAmelCase, '''create_state''' ) and getattr(__lowerCAmelCase, '''has_state''', __lowerCAmelCase ):
lowerCamelCase_ =scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False, **lowerCAmelCase ):
"""simple docstring"""
self.save_config(save_directory=__lowerCAmelCase, push_to_hub=__lowerCAmelCase, **__lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase_ =importlib.import_module(__name__.split('''.''' )[0] )
lowerCamelCase_ =[
getattr(__lowerCAmelCase, __lowerCAmelCase ) for c in compatible_classes_str if hasattr(__lowerCAmelCase, __lowerCAmelCase )
]
return compatible_classes
def a_ ( __snake_case : jnp.ndarray , __snake_case : Tuple[int] ) -> str:
"""simple docstring"""
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def a_ ( __snake_case : int , __snake_case : str=0.9_9_9 , __snake_case : List[Any]=jnp.floataa ) -> List[str]:
"""simple docstring"""
def alpha_bar(__snake_case : List[str] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCamelCase_ =[]
for i in range(a__ ):
lowerCamelCase_ =i / num_diffusion_timesteps
lowerCamelCase_ =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class __UpperCamelCase :
lowercase : jnp.ndarray
lowercase : jnp.ndarray
lowercase : jnp.ndarray
@classmethod
def lowercase__ ( cls, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =scheduler.config
if config.trained_betas is not None:
lowerCamelCase_ =jnp.asarray(config.trained_betas, dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase_ =jnp.linspace(config.beta_start, config.beta_end, config.num_train_timesteps, dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ =(
jnp.linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_timesteps, dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ =betas_for_alpha_bar(config.num_train_timesteps, dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowerCamelCase_ =1.0 - betas
lowerCamelCase_ =jnp.cumprod(__lowerCAmelCase, axis=0 )
return cls(
alphas=__lowerCAmelCase, betas=__lowerCAmelCase, alphas_cumprod=__lowerCAmelCase, )
def a_ ( __snake_case : CommonSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =state.alphas_cumprod
lowerCamelCase_ =alphas_cumprod[timesteps] ** 0.5
lowerCamelCase_ =sqrt_alpha_prod.flatten()
lowerCamelCase_ =broadcast_to_shape_from_left(a__ , original_samples.shape )
lowerCamelCase_ =(1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase_ =sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase_ =broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a_ ( __snake_case : CommonSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ) -> str:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
lowerCamelCase_ =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a_ ( __snake_case : CommonSchedulerState , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray , __snake_case : jnp.ndarray ) -> Tuple:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
lowerCamelCase_ =sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : Any = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
lowercase : Union[str, Any] ='codegen'
lowercase : Union[str, Any] ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, lowerCAmelCase=50_400, lowerCAmelCase=2_048, lowerCAmelCase=2_048, lowerCAmelCase=4_096, lowerCAmelCase=28, lowerCAmelCase=16, lowerCAmelCase=64, lowerCAmelCase=None, lowerCAmelCase="gelu_new", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=1e-5, lowerCAmelCase=0.0_2, lowerCAmelCase=True, lowerCAmelCase=50_256, lowerCAmelCase=50_256, lowerCAmelCase=False, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =n_ctx
lowerCamelCase_ =n_positions
lowerCamelCase_ =n_embd
lowerCamelCase_ =n_layer
lowerCamelCase_ =n_head
lowerCamelCase_ =n_inner
lowerCamelCase_ =rotary_dim
lowerCamelCase_ =activation_function
lowerCamelCase_ =resid_pdrop
lowerCamelCase_ =embd_pdrop
lowerCamelCase_ =attn_pdrop
lowerCamelCase_ =layer_norm_epsilon
lowerCamelCase_ =initializer_range
lowerCamelCase_ =use_cache
lowerCamelCase_ =bos_token_id
lowerCamelCase_ =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, tie_word_embeddings=lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( _SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase, lowerCAmelCase = "default", lowerCAmelCase = None, lowerCAmelCase = False, ):
"""simple docstring"""
super().__init__(lowerCAmelCase, task=lowerCAmelCase, patching_specs=lowerCAmelCase, use_past=lowerCAmelCase )
if not getattr(self._config, '''pad_token_id''', lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase_ =0
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase, direction='''inputs''' )
lowerCamelCase_ ={0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase_ ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowercase__ ( self ):
"""simple docstring"""
return self._config.n_head
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =super(lowerCAmelCase, self ).generate_dummy_inputs(
lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase_ =OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowerCamelCase_ =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase_ =seqlen + 2
lowerCamelCase_ =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase_ =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase_ =common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase_ =ordered_inputs["""attention_mask"""].dtype
lowerCamelCase_ =torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 )
return ordered_inputs
@property
def lowercase__ ( self ):
"""simple docstring"""
return 13
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def a_ ( __snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def a_ ( __snake_case : str ) -> Any:
"""simple docstring"""
# word like '180' or '身高' or '神'
for char in word:
lowerCamelCase_ =ord(lowerCAmelCase__ )
if not _is_chinese_char(lowerCAmelCase__ ):
return 0
return 1
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =set()
for token in tokens:
lowerCamelCase_ =len(lowerCAmelCase__ ) > 1 and is_chinese(lowerCAmelCase__ )
if chinese_word:
word_set.add(lowerCAmelCase__ )
lowerCamelCase_ =list(lowerCAmelCase__ )
return word_list
def a_ ( __snake_case : List[str] , __snake_case : set() ) -> Union[str, Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
lowerCamelCase_ =max([len(lowerCAmelCase__ ) for w in chinese_word_set] )
lowerCamelCase_ =bert_tokens
lowerCamelCase_ =0, len(lowerCAmelCase__ )
while start < end:
lowerCamelCase_ =True
if is_chinese(bert_word[start] ):
lowerCamelCase_ =min(end - start , lowerCAmelCase__ )
for i in range(lowerCAmelCase__ , 1 , -1 ):
lowerCamelCase_ =''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCamelCase_ ='''##''' + bert_word[j]
lowerCamelCase_ =start + i
lowerCamelCase_ =False
break
if single_word:
start += 1
return bert_word
def a_ ( __snake_case : List[str] , __snake_case : LTP , __snake_case : BertTokenizer ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
lowerCamelCase_ =ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCamelCase_ =[get_chinese_word(lowerCAmelCase__ ) for r in res]
ltp_res.extend(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
lowerCamelCase_ =[]
for i in range(0 , len(lowerCAmelCase__ ) , 100 ):
lowerCamelCase_ =bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
lowerCamelCase_ =[]
for input_ids, chinese_word in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCamelCase_ =[]
for id in input_ids:
lowerCamelCase_ =bert_tokenizer._convert_id_to_token(lowerCAmelCase__ )
input_tokens.append(lowerCAmelCase__ )
lowerCamelCase_ =add_sub_symbol(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase_ =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase__ ):
if token[:2] == "##":
lowerCamelCase_ =token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase__ ) == 1 and _is_chinese_char(ord(lowerCAmelCase__ ) ):
ref_id.append(lowerCAmelCase__ )
ref_ids.append(lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
return ref_ids
def a_ ( __snake_case : str ) -> Optional[Any]:
"""simple docstring"""
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.readlines()
lowerCamelCase_ =[line.strip() for line in data if len(lowerCAmelCase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCamelCase_ =LTP(args.ltp ) # faster in GPU device
lowerCamelCase_ =BertTokenizer.from_pretrained(args.bert )
lowerCamelCase_ =prepare_ref(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =[json.dumps(lowerCAmelCase__ ) + '''\n''' for ref in ref_ids]
f.writelines(lowerCAmelCase__ )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
a_ : int = parser.parse_args()
main(args)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : List[Any] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
a_ : Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
a_ : List[Any] = {
"moussaKam/mbarthez": 10_24,
"moussaKam/barthez": 10_24,
"moussaKam/barthez-orangesum-title": 10_24,
}
a_ : Any = "▁"
class __UpperCamelCase ( lowercase_ ):
lowercase : List[Any] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Tuple = ["input_ids", "attention_mask"]
def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =AddedToken(a__, lstrip=a__, rstrip=a__ ) if isinstance(a__, a__ ) else mask_token
lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__, eos_token=a__, unk_token=a__, sep_token=a__, cls_token=a__, pad_token=a__, mask_token=a__, sp_model_kwargs=self.sp_model_kwargs, **a__, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
lowerCamelCase_ ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowerCamelCase_ =len(self.sp_model ) - 1
lowerCamelCase_ ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
lowerCamelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__, token_ids_a=a__, already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.sp_model.encode(a__, out_type=a__ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase_ =self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =''''''
lowerCamelCase_ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
lowerCamelCase_ =True
lowerCamelCase_ =[]
else:
current_sub_tokens.append(a__ )
lowerCamelCase_ =False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase_ =self.__dict__.copy()
lowerCamelCase_ =None
return state
def __setstate__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
lowerCamelCase_ ={}
lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
a__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__, '''wb''' ) as fi:
lowerCamelCase_ =self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
a_ : List[Any] = 8.31_44_62 # Unit - J mol-1 K-1
def a_ ( __snake_case : int , __snake_case : List[str] , __snake_case : List[Any] ) -> int:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : str = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ['ConditionalDetrFeatureExtractor']
a_ : Union[str, Any] = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowercase : str =field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] =Features({'question': Value('string' ), 'context': Value('string' )} )
lowercase : ClassVar[Features] =Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowercase : str ="question"
lowercase : str ="context"
lowercase : str ="answers"
@property
def lowercase__ ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
import string
import numpy
def a_ ( __snake_case : Any , __snake_case : str ) -> List[str]:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _A )
class __UpperCamelCase :
lowercase : Dict =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase : Union[str, Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
lowercase : List[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.modulus(__snake_case ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCamelCase_ =encrypt_key.shape[0]
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.key_string.index(__snake_case )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.key_string[round(__snake_case )]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase_ =det % len(self.key_string )
lowerCamelCase_ =len(self.key_string )
if greatest_common_divisor(__snake_case, len(self.key_string ) ) != 1:
lowerCamelCase_ =(
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__snake_case )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[char for char in text.upper() if char in self.key_string]
lowerCamelCase_ =chars[-1]
while len(__snake_case ) % self.break_key != 0:
chars.append(__snake_case )
return "".join(__snake_case )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.process_text(text.upper() )
lowerCamelCase_ =''
for i in range(0, len(__snake_case ) - self.break_key + 1, self.break_key ):
lowerCamelCase_ =text[i : i + self.break_key]
lowerCamelCase_ =[self.replace_letters(__snake_case ) for char in batch]
lowerCamelCase_ =numpy.array([vec] ).T
lowerCamelCase_ =self.modulus(self.encrypt_key.dot(__snake_case ) ).T.tolist()[
0
]
lowerCamelCase_ =''.join(
self.replace_digits(__snake_case ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCamelCase_ =det % len(self.key_string )
lowerCamelCase_ =None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCamelCase_ =i
break
lowerCamelCase_ =(
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__snake_case ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.make_decrypt_key()
lowerCamelCase_ =self.process_text(text.upper() )
lowerCamelCase_ =''
for i in range(0, len(__snake_case ) - self.break_key + 1, self.break_key ):
lowerCamelCase_ =text[i : i + self.break_key]
lowerCamelCase_ =[self.replace_letters(__snake_case ) for char in batch]
lowerCamelCase_ =numpy.array([vec] ).T
lowerCamelCase_ =self.modulus(decrypt_key.dot(__snake_case ) ).T.tolist()[0]
lowerCamelCase_ =''.join(
self.replace_digits(__snake_case ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =int(input('''Enter the order of the encryption key: ''' ) )
lowerCamelCase_ =[]
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(_A ):
lowerCamelCase_ =[int(_A ) for x in input().split()]
hill_matrix.append(_A )
lowerCamelCase_ =HillCipher(numpy.array(_A ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
lowerCamelCase_ =input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
lowerCamelCase_ =input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(_A ) )
elif option == "2":
lowerCamelCase_ =input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
a_ : Tuple = pd.read_csv("""sample_data.csv""", header=None)
a_ : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
a_ : List[str] = df.iloc[:, 1:2]
a_ : Optional[int] = actual_data.values.reshape(len_data, 1)
a_ : Any = MinMaxScaler().fit_transform(actual_data)
a_ : Any = 10
a_ : List[Any] = 5
a_ : str = 20
a_ : List[str] = len_data - periods * look_back
a_ : List[str] = actual_data[:division]
a_ : int = actual_data[division - look_back :]
a_ : Optional[int] = [], []
a_ : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
a_ : Union[str, Any] = np.array(train_x)
a_ : Tuple = np.array(test_x)
a_ : Optional[int] = np.array([list(i.ravel()) for i in train_y])
a_ : Dict = np.array([list(i.ravel()) for i in test_y])
a_ : Dict = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
a_ : Optional[Any] = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
a_ : List[Any] = model.predict(x_test)
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a_ ( __snake_case : Union[str, Any] , __snake_case : Any ) -> List[str]:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ =TextDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_text_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def a_ ( __snake_case : Dict , __snake_case : Any , __snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
lowerCamelCase_ =features.copy() if features else default_expected_features
lowerCamelCase_ =(
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ =TextDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read()
_check_text_dataset(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
lowerCamelCase_ =TextDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read()
_check_text_dataset(__snake_case , __snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a_ ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
if issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =text_path
elif issubclass(__snake_case , __snake_case ):
lowerCamelCase_ =[text_path]
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
lowerCamelCase_ =TextDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_text_dataset(__snake_case , __snake_case )
def a_ ( __snake_case : List[str] , __snake_case : int , __snake_case : str=("train",) ) -> Dict:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case )
for split in splits:
lowerCamelCase_ =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase_ =TextDatasetReader({'''train''': text_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read()
_check_text_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
lowerCamelCase_ ={'''text''': '''string'''}
lowerCamelCase_ =features.copy() if features else default_expected_features
lowerCamelCase_ =(
Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase_ =TextDatasetReader({'''train''': text_path} , features=__snake_case , cache_dir=__snake_case ).read()
_check_text_datasetdict(__snake_case , __snake_case )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a_ ( __snake_case : int , __snake_case : Dict , __snake_case : Any ) -> List[str]:
"""simple docstring"""
if split:
lowerCamelCase_ ={split: text_path}
else:
lowerCamelCase_ ='''train'''
lowerCamelCase_ ={'''train''': text_path, '''test''': text_path}
lowerCamelCase_ =tmp_path / '''cache'''
lowerCamelCase_ ={'''text''': '''string'''}
lowerCamelCase_ =TextDatasetReader(__snake_case , cache_dir=__snake_case ).read()
_check_text_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
a_ : Optional[Any] = 2_56
# Modulus to hash a string
a_ : Tuple = 1_00_00_03
def a_ ( __snake_case : str , __snake_case : str ) -> bool:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =len(__snake_case )
if p_len > t_len:
return False
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =1
# Calculating the hash of pattern and substring of text
for i in range(__snake_case ):
lowerCamelCase_ =(ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCamelCase_ =(ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCamelCase_ =(modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCamelCase_ =(
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ ='''abc1abc12'''
lowerCamelCase_ ='''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase_ ='''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__snake_case , __snake_case ) and not rabin_karp(__snake_case , __snake_case )
# Test 2)
lowerCamelCase_ ='''ABABX'''
lowerCamelCase_ ='''ABABZABABYABABX'''
assert rabin_karp(__snake_case , __snake_case )
# Test 3)
lowerCamelCase_ ='''AAAB'''
lowerCamelCase_ ='''ABAAAAAB'''
assert rabin_karp(__snake_case , __snake_case )
# Test 4)
lowerCamelCase_ ='''abcdabcy'''
lowerCamelCase_ ='''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__snake_case , __snake_case )
# Test 5)
lowerCamelCase_ ='''Lü'''
lowerCamelCase_ ='''Lüsai'''
assert rabin_karp(__snake_case , __snake_case )
lowerCamelCase_ ='''Lue'''
assert not rabin_karp(__snake_case , __snake_case )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a_ : List[Any] = {
"""vinai/phobert-base""": 2_56,
"""vinai/phobert-large""": 2_56,
}
def a_ ( __snake_case : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =set()
lowerCamelCase_ =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ =char
lowerCamelCase_ =set(__snake_case )
return pairs
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : str =VOCAB_FILES_NAMES
lowercase : int =PRETRAINED_VOCAB_FILES_MAP
lowercase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="</s>", lowerCAmelCase="<s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<mask>", **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, **__a, )
lowerCamelCase_ =vocab_file
lowerCamelCase_ =merges_file
lowerCamelCase_ ={}
lowerCamelCase_ =0
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =3
self.add_from_file(__a )
lowerCamelCase_ ={v: k for k, v in self.encoder.items()}
with open(__a, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase_ =merges_handle.read().split('''\n''' )[:-1]
lowerCamelCase_ =[tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase_ =dict(zip(__a, range(len(__a ) ) ) )
lowerCamelCase_ ={}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
lowerCamelCase_ =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def lowercase__ ( self ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCamelCase_ =tuple(__a )
lowerCamelCase_ =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase_ =get_pairs(__a )
if not pairs:
return token
while True:
lowerCamelCase_ =min(__a, key=lambda lowerCAmelCase : self.bpe_ranks.get(__a, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_, lowerCamelCase_ =bigram
lowerCamelCase_ =[]
lowerCamelCase_ =0
while i < len(__a ):
try:
lowerCamelCase_ =word.index(__a, __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ =j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ =tuple(__a )
lowerCamelCase_ =new_word
if len(__a ) == 1:
break
else:
lowerCamelCase_ =get_pairs(__a )
lowerCamelCase_ ='''@@ '''.join(__a )
lowerCamelCase_ =word[:-4]
lowerCamelCase_ =word
return word
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =re.findall(R'''\S+\n?''', __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(''' ''' ) ) )
return split_tokens
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(__a, self.encoder.get(self.unk_token ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(__a, self.unk_token )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =''' '''.join(__a ).replace('''@@ ''', '''''' ).strip()
return out_string
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ =os.path.join(
__a, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(
__a, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file, __a )
if os.path.abspath(self.merges_file ) != os.path.abspath(__a ):
copyfile(self.merges_file, __a )
return out_vocab_file, out_merge_file
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
if isinstance(__a, __a ):
try:
with open(__a, '''r''', encoding='''utf-8''' ) as fd:
self.add_from_file(__a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowerCamelCase_ =f.readlines()
for lineTmp in lines:
lowerCamelCase_ =lineTmp.strip()
lowerCamelCase_ =line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCamelCase_ =line[:idx]
lowerCamelCase_ =len(self.encoder )
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( _snake_case , _snake_case , unittest.TestCase ):
lowercase : List[Any] =IFInpaintingPipeline
lowercase : Union[str, Any] =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowercase : str =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : List[Any] =PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
a_ : Tuple = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( a__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase : Tuple ="ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 128, 128), rng=random.Random(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ =torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ort.SessionOptions()
lowerCamelCase_ =False
return options
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ='A fantasy landscape, trending on artstation'
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=10, generator=SCREAMING_SNAKE_CASE_, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
lowerCamelCase_ =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', subfolder='''scheduler''' )
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', scheduler=SCREAMING_SNAKE_CASE_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ='A fantasy landscape, trending on artstation'
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=20, generator=SCREAMING_SNAKE_CASE_, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class __UpperCamelCase :
lowercase : Union[str, Any] =None
lowercase : int =False
lowercase : str =False
lowercase : List[Any] =False
lowercase : Dict =None
lowercase : List[Any] =None
lowercase : Any =False
lowercase : List[Any] =False
lowercase : int =False
lowercase : Tuple =True
lowercase : Union[str, Any] =None
lowercase : Optional[Any] =1
lowercase : List[Any] =None
lowercase : List[str] =False
lowercase : Tuple =None
lowercase : str =None
def lowercase__ ( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
from torch import nn
class __UpperCamelCase ( nn.Module ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =class_size
lowerCamelCase_ =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCamelCase_ =nn.Linear(__lowercase, __lowercase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.mlp(__lowercase )
return logits
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __UpperCamelCase :
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =data
lowerCamelCase_ =None
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ =None
lowerCamelCase_ =None
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =self.head
while self.head:
yield node.data
lowerCamelCase_ =node.next
if node == self.head:
break
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join(str(_UpperCAmelCase ) for item in iter(self ) )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(len(self ), _UpperCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.insert_nth(0, _UpperCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
lowerCamelCase_ =Node(_UpperCAmelCase )
if self.head is None:
lowerCamelCase_ =new_node # first node points itself
lowerCamelCase_ =new_node
elif index == 0: # insert at head
lowerCamelCase_ =self.head
lowerCamelCase_ =new_node
else:
lowerCamelCase_ =self.head
for _ in range(index - 1 ):
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next
lowerCamelCase_ =new_node
if index == len(self ) - 1: # insert at tail
lowerCamelCase_ =new_node
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self, lowerCAmelCase = 0 ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
lowerCamelCase_ =self.head
if self.head == self.tail: # just one node
lowerCamelCase_ =None
elif index == 0: # delete head node
lowerCamelCase_ =self.tail.next.next
lowerCamelCase_ =self.head.next
else:
lowerCamelCase_ =self.head
for _ in range(index - 1 ):
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next
lowerCamelCase_ =temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCamelCase_ =temp
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return len(self ) == 0
def a_ ( ) -> None:
"""simple docstring"""
lowerCamelCase_ =CircularLinkedList()
assert len(lowerCAmelCase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowerCAmelCase_ ) == i
circular_linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ : Optional[int] = getLogger(__name__)
a_ : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def a_ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[Any] = 8 , __snake_case : Union[str, Any] = DEFAULT_DEVICE , __snake_case : Dict=False , __snake_case : Dict="summarization" , __snake_case : Tuple=None , **__snake_case : Optional[Any] , ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =Path(__SCREAMING_SNAKE_CASE ).open('''w''' , encoding='''utf-8''' )
lowerCamelCase_ =str(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if fpaa:
lowerCamelCase_ =model.half()
lowerCamelCase_ =AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowerCamelCase_ =time.time()
# update config with task specific params
use_task_specific_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if prefix is None:
lowerCamelCase_ =prefix or getattr(model.config , '''prefix''' , '''''' ) or ""
for examples_chunk in tqdm(list(chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ):
lowerCamelCase_ =[prefix + text for text in examples_chunk]
lowerCamelCase_ =tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , truncation=__SCREAMING_SNAKE_CASE , padding='''longest''' ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
lowerCamelCase_ =int(time.time() - start_time ) # seconds
lowerCamelCase_ =len(__SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def a_ ( ) -> List[Any]:
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a_ ( __snake_case : Optional[Any]=True ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=__SCREAMING_SNAKE_CASE , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=__SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=__SCREAMING_SNAKE_CASE , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=__SCREAMING_SNAKE_CASE , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__SCREAMING_SNAKE_CASE , default=8 , required=__SCREAMING_SNAKE_CASE , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=__SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase_ =parser.parse_known_args()
lowerCamelCase_ =parse_numeric_n_bool_cl_kwargs(__SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
lowerCamelCase_ =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase_ =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
lowerCamelCase_ =generate_summaries_or_translations(
__SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase_ =calculate_bleu if "translation" in args.task else calculate_rouge
lowerCamelCase_ =[x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase_ =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ =score_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
scores.update(__SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(__SCREAMING_SNAKE_CASE )
if args.info:
lowerCamelCase_ =args.info
if verbose:
print(__SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(__SCREAMING_SNAKE_CASE , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
a_ : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase_ ):
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''', __snake_case, )
super().__init__(*__snake_case, **__snake_case )
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : Union[str, Any] ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def a_ ( __snake_case : List[Any] ) -> bool:
"""simple docstring"""
lowerCamelCase_ =credit_card_number
lowerCamelCase_ =0
lowerCamelCase_ =len(__snake_case ) - 2
for i in range(__snake_case , -1 , -2 ):
# double the value of every second digit
lowerCamelCase_ =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCamelCase_ =cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__snake_case ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a_ ( __snake_case : Union[str, Any] ) -> bool:
"""simple docstring"""
lowerCamelCase_ =F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__snake_case ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__snake_case ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__snake_case ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=4, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_attention_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =num_choices
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase_ =None
if self.use_attention_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase_ =RobertaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs
lowerCamelCase_ =True
lowerCamelCase_ =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =True
lowercase : List[Any] =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =FlaxRobertaModelTester(self )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCamelCase_ =model_class_name.from_pretrained('''roberta-base''', from_pt=lowerCAmelCase )
lowerCamelCase_ =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] =['image_processor', 'tokenizer']
lowercase : Any ='BlipImageProcessor'
lowercase : List[Any] =('BertTokenizer', 'BertTokenizerFast')
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =False
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
def __call__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase_ =self.tokenizer
lowerCamelCase_ =self.tokenizer(
text=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
return text_encoding
# add pixel_values
lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(
text=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
else:
lowerCamelCase_ =None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase )
return encoding_image_processor
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : List[Any] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_28,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''test-config''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase, repo_id='''test-config''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''', use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-config-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase, getattr(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowerCamelCase_ =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowerCamelCase_ =AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, '''CustomConfig''' )
self.assertEqual(new_config.attribute, 42 )
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase_ =c.n_embd + 1 # int
lowerCamelCase_ =c.resid_pdrop + 1.0 # float
lowerCamelCase_ =not c.scale_attn_weights # bool
lowerCamelCase_ =c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCAmelCase, c.n_embd, '''mismatch for key: n_embd''' )
self.assertEqual(lowerCAmelCase, c.resid_pdrop, '''mismatch for key: resid_pdrop''' )
self.assertEqual(lowerCAmelCase, c.scale_attn_weights, '''mismatch for key: scale_attn_weights''' )
self.assertEqual(lowerCAmelCase, c.summary_type, '''mismatch for key: summary_type''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PretrainedConfig()
lowerCamelCase_ =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase, ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowerCamelCase_ =[key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase, lowerCAmelCase )]
if len(lowerCAmelCase ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(lowerCAmelCase )}.''' )
def lowercase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''', subfolder='''bert''' )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =mock.Mock()
lowerCamelCase_ =500
lowerCamelCase_ ={}
lowerCamelCase_ =HTTPError
lowerCamelCase_ ={}
# Download this model to make sure it's in the cache.
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''', return_value=lowerCAmelCase ) as mock_head:
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =AutoConfig.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase )
lowerCamelCase_ =2
json.dump(configuration.to_dict(), open(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase_ =['''config.42.0.0.json''']
lowerCamelCase_ =768
configuration.save_pretrained(lowerCAmelCase )
shutil.move(os.path.join(lowerCAmelCase, '''config.4.0.0.json''' ), os.path.join(lowerCAmelCase, '''config.42.0.0.json''' ) )
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 768 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowerCamelCase_ ='''v4.0.0'''
lowerCamelCase_, lowerCamelCase_ =new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase, return_unused_kwargs=lowerCAmelCase )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase_ ='''v3.0.0'''
lowerCamelCase_ =old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase )
self.assertEqual(old_configuration.hidden_size, 768 )
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
import os
import sys
import transformers
a_ : str = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =['pixel_values']
def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BILINEAR, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 256}
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=size['''shortest_edge'''], default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
return center_crop(lowerCAmelCase, size=(size['''height'''], size['''width''']), data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ):
"""simple docstring"""
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase )
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(lowerCAmelCase )
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(image=lowerCAmelCase, size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowerCamelCase_ ={'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : str , __snake_case : str ) -> float:
"""simple docstring"""
def get_matched_characters(__snake_case : str , __snake_case : str ) -> str:
lowerCamelCase_ =[]
lowerCamelCase_ =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase_ =int(max(0 , i - limit ) )
lowerCamelCase_ =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__snake_case )
lowerCamelCase_ =F'''{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}'''
return "".join(__snake_case )
# matching characters
lowerCamelCase_ =get_matched_characters(__snake_case , __snake_case )
lowerCamelCase_ =get_matched_characters(__snake_case , __snake_case )
lowerCamelCase_ =len(__snake_case )
# transposition
lowerCamelCase_ =(
len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase_ =0.0
else:
lowerCamelCase_ =(
1
/ 3
* (
match_count / len(__snake_case )
+ match_count / len(__snake_case )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase_ =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
a_ : List[Any] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
a_ : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
a_ : Optional[Any] = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ), reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1, lowerCAmelCase="binary", lowerCAmelCase=None, lowerCAmelCase="warn", ):
"""simple docstring"""
lowerCamelCase_ =recall_score(
lowerCAmelCase, lowerCAmelCase, labels=lowerCAmelCase, pos_label=lowerCAmelCase, average=lowerCAmelCase, sample_weight=lowerCAmelCase, zero_division=lowerCAmelCase, )
return {"recall": float(lowerCAmelCase ) if score.size == 1 else score}
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations(__snake_case : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__snake_case )
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
__snake_case : int , __snake_case : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCamelCase_ =sum(
count_of_possible_combinations_with_dp_array(target - item , __snake_case )
for item in array )
lowerCamelCase_ =answer
return answer
lowerCamelCase_ =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case )
def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =[0] * (target + 1)
lowerCamelCase_ =1
for i in range(1 , target + 1 ):
for j in range(__snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = 3
a_ : str = 5
a_ : Union[str, Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 358 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def a_ ( __snake_case : Tuple ) -> str:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCamelCase ( lowerCamelCase__ ):
@staticmethod
def lowercase__ ( lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''', type=lowerCAmelCase, default=lowerCAmelCase, help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''', action='''store_true''', help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''', action='''store_true''', help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''', )
download_parser.add_argument('''model''', type=lowerCAmelCase, help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =model
lowerCamelCase_ =cache
lowerCamelCase_ =force
lowerCamelCase_ =trust_remote_code
def lowercase__ ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model, cache_dir=self._cache, force_download=self._force, trust_remote_code=self._trust_remote_code )
| 6 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ ={}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowerCamelCase_ =[[w, v]]
if not self.graph.get(lowerCAmelCase ):
lowerCamelCase_ =[]
def lowercase__ ( self ):
"""simple docstring"""
return list(self.graph )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def lowercase__ ( self, lowerCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
lowerCamelCase_ =floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return sorted_nodes
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.dfs(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.bfs(lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ ={}
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1 ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowerCamelCase_ =[[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowerCamelCase_ =[[w, u]]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase )
# the other way round
if self.graph.get(lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
if s == d:
return []
lowerCamelCase_ =[]
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return visited
def lowercase__ ( self, lowerCAmelCase=-1 ):
"""simple docstring"""
if c == -1:
lowerCamelCase_ =floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowerCamelCase_ =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase, lowerCAmelCase, 1 )
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =deque()
lowerCamelCase_ =[]
if s == -2:
lowerCamelCase_ =list(self.graph )[0]
d.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
while d:
lowerCamelCase_ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
return len(self.graph[u] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return list(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =list(self.graph )[0]
stack.append(lowerCAmelCase )
visited.append(lowerCAmelCase )
lowerCamelCase_ =-2
lowerCamelCase_ =[]
lowerCamelCase_ =s
lowerCamelCase_ =False
lowerCamelCase_ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowerCamelCase_ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowerCamelCase_ =len(lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowerCamelCase_ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowerCamelCase_ =True
if len(lowerCAmelCase ) != 0:
lowerCamelCase_ =stack[len(lowerCAmelCase ) - 1]
else:
lowerCamelCase_ =False
indirect_parents.append(lowerCAmelCase )
lowerCamelCase_ =s
lowerCamelCase_ =ss
# check if se have reached the starting point
if len(lowerCAmelCase ) == 0:
return False
def lowercase__ ( self ):
"""simple docstring"""
return list(self.graph )
def lowercase__ ( self, lowerCAmelCase=-2, lowerCAmelCase=-1 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.dfs(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
def lowercase__ ( self, lowerCAmelCase=-2 ):
"""simple docstring"""
lowerCamelCase_ =time()
self.bfs(lowerCAmelCase )
lowerCamelCase_ =time()
return end - begin
| 359 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 6 | 0 |
'''simple docstring'''
a_ : int = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 360 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
a_ : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
a_ : Any = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def a_ ( __snake_case : int ) -> Any:
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
lowerCamelCase_ =key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
lowerCamelCase_ =key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
lowerCamelCase_ =key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCamelCase_ =key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
lowerCamelCase_ =key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ ={}
import re
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
lowerCamelCase_ =re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
lowerCamelCase_ =re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_conv_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_conv_in.sub(__snake_case , __snake_case )
elif re_encoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] )
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_encoder_block_resnet.sub(__snake_case , __snake_case )
elif re_encoder_block_proj_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_encoder_block_proj_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
lowerCamelCase_ =re_encoder_block_proj_out.sub(__snake_case , __snake_case )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_conv_out.sub(__snake_case , __snake_case )
elif re_decoder_block_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_decoder_block_resnet.sub(__snake_case , __snake_case )
elif re_decoder_block_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_decoder_block_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_decoder_block_proj_in.sub(__snake_case , __snake_case )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_conv_out.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_conv_out.sub(__snake_case , __snake_case )
elif re_prior_cond_resnet.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_resnet.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCamelCase_ ={'''1''': 1, '''3''': 2}[groups[-2]]
lowerCamelCase_ =F'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
lowerCamelCase_ =F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
lowerCamelCase_ =prefix + resnet_block
lowerCamelCase_ =re_prior_cond_resnet.sub(__snake_case , __snake_case )
elif re_prior_cond_proj_in.fullmatch(__snake_case ):
lowerCamelCase_ =re_prior_cond_proj_in.match(__snake_case )
lowerCamelCase_ =regex_match.groups()
lowerCamelCase_ =F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
lowerCamelCase_ =re_prior_cond_proj_in.sub(__snake_case , __snake_case )
# keep original key
else:
lowerCamelCase_ =original_key
lowerCamelCase_ =replace_key(__snake_case )
if F'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(F'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape:
lowerCamelCase_ =model_state_dict[F'''{key_prefix}.{key}''']
print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
lowerCamelCase_ =original_key
lowerCamelCase_ =original_key
lowerCamelCase_ =value
return new_dict
@torch.no_grad()
def a_ ( __snake_case : List[str]=None , __snake_case : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
lowerCamelCase_ =requests.get(F'''{PREFIX}{file}''' , allow_redirects=__snake_case )
os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=__snake_case )
open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , '''wb''' ).write(r.content )
lowerCamelCase_ =MODEL_MAPPING[model_name.split('''/''' )[-1]]
lowerCamelCase_ =JukeboxConfig.from_pretrained(__snake_case )
lowerCamelCase_ =JukeboxModel(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ ={}
for i, dict_name in enumerate(__snake_case ):
lowerCamelCase_ =torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )['''model''']
lowerCamelCase_ ={}
for k in old_dic.keys():
if k.endswith('''.b''' ):
lowerCamelCase_ =old_dic[k]
elif k.endswith('''.w''' ):
lowerCamelCase_ =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCamelCase_ =old_dic[k]
else:
lowerCamelCase_ =old_dic[k]
lowerCamelCase_ ='''vqvae''' if i == 0 else F'''priors.{3 - i}'''
lowerCamelCase_ =fix_jukebox_keys(__snake_case , model.state_dict() , __snake_case , __snake_case )
weight_dict.append(__snake_case )
lowerCamelCase_ =weight_dict.pop(0 )
model.vqvae.load_state_dict(__snake_case )
for i in range(len(__snake_case ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile:
json.dump(__snake_case , __snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
return weight_dict
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
a_ : Optional[int] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a_ : Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=18, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=None, ):
"""simple docstring"""
lowerCamelCase_ =size if size is not None else {'''height''': 20, '''width''': 20}
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =min_resolution
lowerCamelCase_ =max_resolution
lowerCamelCase_ =size
lowerCamelCase_ =do_normalize
lowerCamelCase_ =do_convert_rgb
lowerCamelCase_ =[512, 1_024, 2_048, 4_096]
lowerCamelCase_ =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def lowercase__ ( self ):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
lowerCamelCase_ =Image.open(requests.get(lowerCAmelCase, stream=lowerCAmelCase ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : List[str] =PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PixaStructImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processor_tester.prepare_dummy_image()
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
lowerCamelCase_ =2_048
lowerCamelCase_ =image_processor(lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6 ), atol=1e-3, rtol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowerCamelCase_ =image_processor(
lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
lowerCamelCase_ =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
lowerCamelCase_ ='''Hello'''
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowerCamelCase_ =image_processor(
lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase, header_text=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, np.ndarray )
lowerCamelCase_ =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowerCamelCase_ =image_processor(
lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
# Test not batched input
lowerCamelCase_ =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowerCamelCase_ =image_processor(
lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : int =PixaStructImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PixaStructImageProcessingTester(self, num_channels=4 )
lowerCamelCase_ =3
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase, '''do_convert_rgb''' ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase, Image.Image )
# Test not batched input
lowerCamelCase_ =(
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowerCamelCase_ =image_processor(
image_inputs[0], return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
lowerCamelCase_ =image_processor(
lowerCAmelCase, return_tensors='''pt''', max_patches=lowerCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 361 |
'''simple docstring'''
def a_ ( __snake_case : int = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =1, 1
lowerCamelCase_ =2
while True:
lowerCamelCase_ =0
lowerCamelCase_ =fa + fa
lowerCamelCase_, lowerCamelCase_ =fa, f
index += 1
for _ in str(__snake_case ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 6 | 0 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ : Dict = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a_ : int = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a_ : Optional[int] = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''', id='''token''' ), id='''sequence''' ), id='''references''' ),
} ), codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''], reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=4, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =compute_bleu(
reference_corpus=lowerCAmelCase, translation_corpus=lowerCAmelCase, max_order=lowerCAmelCase, smooth=lowerCAmelCase )
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) =score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 362 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def a_ ( __snake_case : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '''
F'''{test_file} instead.''' )
lowerCamelCase_ =components[-1]
if not test_fn.endswith('''py''' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('''test_modeling_''' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowerCamelCase_ =components[:-1] + [test_fn.replace('''.py''' , '''''' )]
lowerCamelCase_ ='''.'''.join(__snake_case )
return test_module_path
def a_ ( __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =get_module_path(__snake_case )
lowerCamelCase_ =importlib.import_module(__snake_case )
return test_module
def a_ ( __snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
if attr.endswith('''ModelTester''' ):
tester_classes.append(getattr(__snake_case , __snake_case ) )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =get_test_module(__snake_case )
for attr in dir(__snake_case ):
lowerCamelCase_ =getattr(__snake_case , __snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCamelCase_ =getattr(__snake_case , '''all_model_classes''' , [] )
if len(__snake_case ) > 0:
test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =test_class()
if hasattr(__snake_case , '''setUp''' ):
test.setUp()
lowerCamelCase_ =None
if hasattr(__snake_case , '''model_tester''' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCamelCase_ =test.model_tester.__class__
return model_tester
def a_ ( __snake_case : Dict , __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =get_test_classes_for_model(__snake_case , __snake_case )
lowerCamelCase_ =[]
for test_class in test_classes:
lowerCamelCase_ =get_model_tester_from_test_class(__snake_case )
if tester_class is not None:
tester_classes.append(__snake_case )
# sort with class names
return sorted(__snake_case , key=lambda __snake_case : x.__name__ )
def a_ ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ =get_test_classes(__snake_case )
lowerCamelCase_ ={test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes}
return test_tester_mapping
def a_ ( __snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_test_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_test_mapping
def a_ ( __snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =get_model_classes(__snake_case )
lowerCamelCase_ ={
model_class: get_tester_classes_for_model(__snake_case , __snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def a_ ( __snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return o
elif isinstance(__snake_case , __snake_case ):
return o.__name__
elif isinstance(__snake_case , (list, tuple) ):
return [to_json(__snake_case ) for x in o]
elif isinstance(__snake_case , __snake_case ):
return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()}
else:
return o
| 6 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int]=0.9_9_9 , __snake_case : List[str]="cosine" , ) -> int:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Tuple ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowerCamelCase_ =[]
for i in range(__snake_case ):
lowerCamelCase_ =i / num_diffusion_timesteps
lowerCamelCase_ =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : List[Any] =[e.name for e in KarrasDiffusionSchedulers]
lowercase : str =2
@register_to_config
def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_8_5, lowerCAmelCase = 0.0_1_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = "epsilon", lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = 1.0, lowerCAmelCase = "linspace", lowerCAmelCase = 0, ):
"""simple docstring"""
if trained_betas is not None:
lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_ =torch.linspace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ =(
torch.linspace(beta_start**0.5, beta_end**0.5, lowerCAmelCase, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowerCamelCase_ =1.0 - self.betas
lowerCamelCase_ =torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =use_karras_sigmas
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ):
"""simple docstring"""
if schedule_timesteps is None:
lowerCamelCase_ =self.timesteps
lowerCamelCase_ =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase_ =1 if len(lowerCAmelCase ) > 1 else 0
else:
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
lowerCamelCase_ =self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase )
lowerCamelCase_ =self.sigmas[step_index]
lowerCamelCase_ =sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =num_inference_steps
lowerCamelCase_ =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase_ =np.linspace(0, num_train_timesteps - 1, lowerCAmelCase, dtype=lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase_ =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(0, lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase_ =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ =(np.arange(lowerCAmelCase, 0, -step_ratio )).round().copy().astype(lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowerCamelCase_ =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase_ =np.log(lowerCAmelCase )
lowerCamelCase_ =np.interp(lowerCAmelCase, np.arange(0, len(lowerCAmelCase ) ), lowerCAmelCase )
if self.config.use_karras_sigmas:
lowerCamelCase_ =self._convert_to_karras(in_sigmas=lowerCAmelCase, num_inference_steps=self.num_inference_steps )
lowerCamelCase_ =np.array([self._sigma_to_t(lowerCAmelCase, lowerCAmelCase ) for sigma in sigmas] )
lowerCamelCase_ =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase )
lowerCamelCase_ =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase )
lowerCamelCase_ =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase ).startswith('''mps''' ):
# mps does not support float64
lowerCamelCase_ =timesteps.to(lowerCAmelCase, dtype=torch.floataa )
else:
lowerCamelCase_ =timesteps.to(device=lowerCAmelCase )
# empty dt and derivative
lowerCamelCase_ =None
lowerCamelCase_ =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase_ =defaultdict(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =np.log(lowerCAmelCase )
# get distribution
lowerCamelCase_ =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowerCamelCase_ =np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowerCamelCase_ =low_idx + 1
lowerCamelCase_ =log_sigmas[low_idx]
lowerCamelCase_ =log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase_ =(low - log_sigma) / (low - high)
lowerCamelCase_ =np.clip(lowerCAmelCase, 0, 1 )
# transform interpolation to time range
lowerCamelCase_ =(1 - w) * low_idx + w * high_idx
lowerCamelCase_ =t.reshape(sigma.shape )
return t
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =in_sigmas[-1].item()
lowerCamelCase_ =in_sigmas[0].item()
lowerCamelCase_ =7.0 # 7.0 is the value used in the paper
lowerCamelCase_ =np.linspace(0, 1, lowerCAmelCase )
lowerCamelCase_ =sigma_min ** (1 / rho)
lowerCamelCase_ =sigma_max ** (1 / rho)
lowerCamelCase_ =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.dt is None
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ):
"""simple docstring"""
lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase )
# advance index counter by 1
lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase_ =self.sigmas[step_index]
lowerCamelCase_ =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowerCamelCase_ =self.sigmas[step_index - 1]
lowerCamelCase_ =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase_ =0
lowerCamelCase_ =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next
lowerCamelCase_ =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next
lowerCamelCase_ =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowerCamelCase_ =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowerCamelCase_ =pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase_ =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase_ =sigma_next - sigma_hat
# store for 2nd order step
lowerCamelCase_ =derivative
lowerCamelCase_ =dt
lowerCamelCase_ =sample
else:
# 2. 2nd order / Heun's method
lowerCamelCase_ =(sample - pred_original_sample) / sigma_next
lowerCamelCase_ =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowerCamelCase_ =self.dt
lowerCamelCase_ =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =None
lowerCamelCase_ =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ):
# mps does not support float64
lowerCamelCase_ =self.timesteps.to(original_samples.device, dtype=torch.floataa )
lowerCamelCase_ =timesteps.to(original_samples.device, dtype=torch.floataa )
else:
lowerCamelCase_ =self.timesteps.to(original_samples.device )
lowerCamelCase_ =timesteps.to(original_samples.device )
lowerCamelCase_ =[self.index_for_timestep(lowerCAmelCase, lowerCAmelCase ) for t in timesteps]
lowerCamelCase_ =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase_ =sigma.unsqueeze(-1 )
lowerCamelCase_ =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 363 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : str =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class __UpperCamelCase ( metaclass=lowerCamelCase__ ):
lowercase : Any =['speech']
def __init__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 6 | 0 |
import os
import numpy
import onnx
def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =a.name
lowerCamelCase_ =b.name
lowerCamelCase_ =''''''
lowerCamelCase_ =''''''
lowerCamelCase_ =a == b
lowerCamelCase_ =name_a
lowerCamelCase_ =name_b
return res
def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__snake_case , __snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , __snake_case , __snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __snake_case , __snake_case )
def a_ ( __snake_case : int , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(__snake_case , __snake_case , __snake_case )
def a_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =list(model.graph.initializer )
lowerCamelCase_ =list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase_ =inits[i].name
lowerCamelCase_ =inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __snake_case , __snake_case )
def a_ ( __snake_case : str ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =os.path.dirname(__snake_case )
lowerCamelCase_ =os.path.basename(__snake_case )
lowerCamelCase_ =onnx.load(os.path.join(__snake_case , __snake_case ) )
lowerCamelCase_ =list(model.graph.initializer )
lowerCamelCase_ =set()
lowerCamelCase_ ={}
lowerCamelCase_ =[]
lowerCamelCase_ =0
for i in range(len(__snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__snake_case )
dup_set.add(__snake_case )
lowerCamelCase_ =inits[j].data_type
lowerCamelCase_ =numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , __snake_case )
total_reduced_size += mem_size
lowerCamelCase_ =inits[i].name
lowerCamelCase_ =inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__snake_case )
else:
lowerCamelCase_ =[name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
lowerCamelCase_ =sorted(__snake_case )
_remove_dup_initializers_from_model(__snake_case , __snake_case , __snake_case )
lowerCamelCase_ ='''optimized_''' + model_file_name
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
onnx.save(__snake_case , __snake_case )
return new_model
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
a_ : str = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_tool('''text-question-answering''' )
self.tool.setup()
lowerCamelCase_ =load_tool('''text-question-answering''', remote=lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool(lowerCAmelCase, '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool(lowerCAmelCase, '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.tool(text=lowerCAmelCase, question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.remote_tool(text=lowerCAmelCase, question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase, '''launched the BigScience Research Workshop''' )
| 365 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =ShapEImgaImgPipeline
lowercase : Dict =['image']
lowercase : str =['image']
lowercase : int =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : int =False
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return 32
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
"""simple docstring"""
return 8
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
lowerCamelCase_ =CLIPVisionModel(lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =CLIPImageProcessor(
crop_size=224, do_center_crop=lowerCAmelCase, do_normalize=lowerCAmelCase, do_resize=lowerCAmelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=224, )
return image_processor
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowerCamelCase_ =PriorTransformer(**lowerCAmelCase )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ ={
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowerCamelCase_ =ShapERenderer(**lowerCAmelCase )
return model
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.dummy_prior
lowerCamelCase_ =self.dummy_image_encoder
lowerCamelCase_ =self.dummy_image_processor
lowerCamelCase_ =self.dummy_renderer
lowerCamelCase_ =HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_024, prediction_type='''sample''', use_karras_sigmas=lowerCAmelCase, clip_sample=lowerCAmelCase, clip_sample_range=1.0, )
lowerCamelCase_ ={
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu'''
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCamelCase_ =np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =torch_device == '''cpu'''
lowerCamelCase_ =True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=lowerCAmelCase, relax_max_difference=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =1
lowerCamelCase_ =2
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
lowerCamelCase_ =batch_size * [inputs[key]]
lowerCamelCase_ =pipe(**lowerCAmelCase, num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowerCamelCase_ =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowerCamelCase_ =ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
lowerCamelCase_ =pipe(
lowerCAmelCase, generator=lowerCAmelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='''np''', ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase )
| 6 | 0 |
from math import sqrt
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a_ ( __snake_case : int = 1_0001 ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =1
while count != nth and number < 3:
number += 1
if is_prime(__snake_case ):
count += 1
while count != nth:
number += 2
if is_prime(__snake_case ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a_ : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __UpperCamelCase :
lowercase : Optional[str] =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'The column name of the images in the files.'} )
lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the training data.'} )
lowercase : Optional[str] =field(default=lowerCamelCase__ , metadata={'help': 'A folder containing the validation data.'} )
lowercase : Optional[float] =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase : Optional[int] =field(
default=lowerCamelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={}
if self.train_dir is not None:
lowerCamelCase_ =self.train_dir
if self.validation_dir is not None:
lowerCamelCase_ =self.validation_dir
lowerCamelCase_ =data_files if data_files else None
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowercase : Optional[str] =field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowercase : str =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase : str =field(default=lowerCamelCase__ , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase : float =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : float =field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def a_ ( __snake_case : List[str] ) -> int:
"""simple docstring"""
lowerCamelCase_ =torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def a_ ( ) -> str:
"""simple docstring"""
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ =training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowerCamelCase_ =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowerCamelCase_ =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCamelCase_ =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
lowerCamelCase_ =ds['''train'''].train_test_split(data_args.train_val_split )
lowerCamelCase_ =split['''train''']
lowerCamelCase_ =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase_ =ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case )
elif model_args.model_name_or_path:
lowerCamelCase_ =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowerCamelCase_ =ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowerCamelCase_ =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
lowerCamelCase_ =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowerCamelCase_ =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowerCamelCase_ =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowerCamelCase_ =ViTMAEForPreTraining(__snake_case )
if training_args.do_train:
lowerCamelCase_ =ds['''train'''].column_names
else:
lowerCamelCase_ =ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowerCamelCase_ =data_args.image_column_name
elif "image" in column_names:
lowerCamelCase_ ='''image'''
elif "img" in column_names:
lowerCamelCase_ ='''img'''
else:
lowerCamelCase_ =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowerCamelCase_ =image_processor.size['''shortest_edge''']
else:
lowerCamelCase_ =(image_processor.size['''height'''], image_processor.size['''width'''])
lowerCamelCase_ =Compose(
[
Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(__snake_case : List[Any] ):
lowerCamelCase_ =[transforms(__snake_case ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowerCamelCase_ =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowerCamelCase_ =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Compute absolute learning rate
lowerCamelCase_ =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowerCamelCase_ =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowerCamelCase_ =Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
lowerCamelCase_ =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ =last_checkpoint
lowerCamelCase_ =trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCamelCase_ =trainer.evaluate()
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
# Write model card and (optionally) push to hub
lowerCamelCase_ ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def a_ ( __snake_case : Optional[int] ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
a_ : Tuple = TypeVar("""T""")
a_ : Dict = Union[List[T], Tuple[T, ...]]
a_ : int = Union[T, List[T], Dict[str, T]]
a_ : Optional[Any] = Union[str, bytes, os.PathLike]
| 6 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a_ : Optional[int] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class __UpperCamelCase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''test-model-flax''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase, repo_id='''test-model-flax''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
model.push_to_hub('''valid_org/test-model-flax-org''', use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCAmelCase, repo_id='''valid_org/test-model-flax-org''', push_to_hub=lowerCAmelCase, use_auth_token=self._token )
lowerCamelCase_ =FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
lowerCamelCase_ =flatten_dict(unfreeze(model.params ) )
lowerCamelCase_ =flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCamelCase_ =(base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCAmelCase, 1e-3, msg=f'''{key} not identical''' )
def a_ ( __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =True
lowerCamelCase_ =flatten_dict(modela.params )
lowerCamelCase_ =flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
lowerCamelCase_ =False
return models_are_equal
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
lowerCamelCase_ =FlaxBertModel(lowerCAmelCase )
lowerCamelCase_ ='''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCAmelCase, lowerCAmelCase ), max_shard_size='''10KB''' )
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertTrue(check_models_equal(lowerCAmelCase, lowerCAmelCase ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''bert'''
lowerCamelCase_ ='''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCAmelCase ):
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =FlaxBertModel.from_pretrained(lowerCAmelCase, subfolder=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
| 368 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a_ ( __snake_case : str , __snake_case : bool = True , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : float = math.inf , __snake_case : float = -math.inf , __snake_case : bool = False , __snake_case : float = 100 , __snake_case : float = 0.0_1 , __snake_case : float = 1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =search_prob
lowerCamelCase_ =start_temperate
lowerCamelCase_ =[]
lowerCamelCase_ =0
lowerCamelCase_ =None
while not search_end:
lowerCamelCase_ =current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase_ =current_state
scores.append(__snake_case )
iterations += 1
lowerCamelCase_ =None
lowerCamelCase_ =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase_ =random.randint(0 , len(__snake_case ) - 1 ) # picking a random neighbor
lowerCamelCase_ =neighbors.pop(__snake_case )
lowerCamelCase_ =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase_ =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase_ =picked_neighbor
else:
lowerCamelCase_ =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase_ =picked_neighbor
lowerCamelCase_ =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase_ =True
else:
lowerCamelCase_ =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__snake_case ) , __snake_case )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def a_ ( __snake_case : List[str] , __snake_case : Optional[int] ) -> str:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a_ : str = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a_ : List[str] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def a_ ( __snake_case : Dict , __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a_ : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a_ : Optional[int] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 6 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='xlm-roberta'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class __UpperCamelCase ( lowerCamelCase__ ):
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 369 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
a_ : List[str] = logging.get_logger(__name__)
a_ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
a_ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowerCamelCase__ )} )
lowercase : str =field(
default=lowerCamelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : int =field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase : int =field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase : int =field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase : bool =field(
default=lowerCamelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase : float =field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase : int =field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase : int =field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='train'
lowercase : Any ='dev'
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : SquadDataTrainingArguments
lowercase : List[SquadFeatures]
lowercase : Split
lowercase : bool
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = Split.train, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "pt", ):
"""simple docstring"""
lowerCamelCase_ =args
lowerCamelCase_ =is_language_sensitive
lowerCamelCase_ =SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowerCAmelCase, lowerCAmelCase ):
try:
lowerCamelCase_ =Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
lowerCamelCase_ =mode
# Load data features from cache or dataset file
lowerCamelCase_ ='''v2''' if args.version_2_with_negative else '''v1'''
lowerCamelCase_ =os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ =cached_features_file + '''.lock'''
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not args.overwrite_cache:
lowerCamelCase_ =time.time()
lowerCamelCase_ =torch.load(lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ =self.old_features['''features''']
lowerCamelCase_ =self.old_features.get('''dataset''', lowerCAmelCase )
lowerCamelCase_ =self.old_features.get('''examples''', lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
lowerCamelCase_ =self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ =self.processor.get_train_examples(args.data_dir )
lowerCamelCase_, lowerCamelCase_ =squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowerCAmelCase, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowerCAmelCase, )
lowerCamelCase_ =time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples}, lowerCAmelCase, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.features[i]
lowerCamelCase_ =torch.tensor(feature.input_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.attention_mask, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.token_type_ids, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.cls_index, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.p_mask, dtype=torch.float )
lowerCamelCase_ =torch.tensor(feature.is_impossible, dtype=torch.float )
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ =torch.tensor(feature.start_position, dtype=torch.long )
lowerCamelCase_ =torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 370 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : int =['image_processor', 'tokenizer']
lowercase : int ='LayoutLMv2ImageProcessor'
lowercase : Any =('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = True, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ =features['''words''']
lowerCamelCase_ =self.tokenizer(
text=text if text is not None else features['''words'''], text_pair=text_pair if text_pair is not None else None, boxes=boxes if boxes is not None else features['''boxes'''], word_labels=lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, stride=lowerCAmelCase, pad_to_multiple_of=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_overflowing_tokens=lowerCAmelCase, return_special_tokens_mask=lowerCAmelCase, return_offsets_mapping=lowerCAmelCase, return_length=lowerCAmelCase, verbose=lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, )
# add pixel values
lowerCamelCase_ =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ =self.get_overflowing_images(lowerCAmelCase, encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ =images
return encoded_inputs
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowerCAmelCase )} and {len(lowerCAmelCase )}''' )
return images_with_overflow
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : List[str] = logging.get_logger(__name__)
a_ : str = {"""tokenizer_file""": """tokenizer.json"""}
a_ : Dict = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] =VOCAB_FILES_NAMES
lowercase : Tuple =PRETRAINED_VOCAB_FILES_MAP
lowercase : List[str] =['input_ids', 'attention_mask']
lowercase : List[str] =None
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="<unk>", lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<pad>", lowerCAmelCase=False, lowerCAmelCase=False, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(
lowerCAmelCase, lowerCAmelCase, tokenizer_file=lowerCAmelCase, unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, pad_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', lowerCAmelCase ) != add_prefix_space:
lowerCamelCase_ =getattr(lowerCAmelCase, pre_tok_state.pop('''type''' ) )
lowerCamelCase_ =add_prefix_space
lowerCamelCase_ =pre_tok_class(**lowerCAmelCase )
lowerCamelCase_ =add_prefix_space
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.get('''is_split_into_words''', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.get('''is_split_into_words''', lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ):
"""simple docstring"""
lowerCamelCase_ =self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
lowerCamelCase_ =input_ids[-self.model_max_length :]
return input_ids
| 371 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Union[str, Any] =VQModel
lowercase : Union[str, Any] ='sample'
@property
def lowercase__ ( self, lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowerCamelCase_ =4
lowerCamelCase_ =3
lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
return {"sample": image}
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowerCamelCase_ =self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''', output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(lowerCAmelCase )
lowerCamelCase_ =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase_ =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size )
lowerCamelCase_ =image.to(lowerCAmelCase )
with torch.no_grad():
lowerCamelCase_ =model(lowerCAmelCase ).sample
lowerCamelCase_ =output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase_ =torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
| 6 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def lowercase__ ( *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Dict =MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, image_processor=lowerCAmelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''', threshold=0.0 )
self.assertGreater(len(lowerCAmelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase, {
'''score''': ANY(lowerCAmelCase ),
'''label''': ANY(lowerCAmelCase ),
'''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )},
}, )
import datasets
lowerCamelCase_ =datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''', '''image''', split='''test''' )
lowerCamelCase_ =[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
lowerCamelCase_ =object_detector(lowerCAmelCase, threshold=0.0 )
self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCAmelCase ), 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase, {
'''score''': ANY(lowerCAmelCase ),
'''label''': ANY(lowerCAmelCase ),
'''box''': {'''xmin''': ANY(lowerCAmelCase ), '''ymin''': ANY(lowerCAmelCase ), '''xmax''': ANY(lowerCAmelCase ), '''ymax''': ANY(lowerCAmelCase )},
}, )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowercase__ ( self ):
"""simple docstring"""
pass
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''hf-internal-testing/tiny-detr-mobilenetsv3'''
lowerCamelCase_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, feature_extractor=lowerCAmelCase )
lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
], )
lowerCamelCase_ =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
], threshold=0.0, )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.3_3_7_6, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
], )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''facebook/detr-resnet-50'''
lowerCamelCase_ =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
lowerCamelCase_ =ObjectDetectionPipeline(model=lowerCAmelCase, feature_extractor=lowerCAmelCase )
lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase_ =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''facebook/detr-resnet-50'''
lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase )
lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
lowerCamelCase_ =object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.9_9_8_2, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.9_9_6_0, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.9_9_5_5, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
], )
@require_torch
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0.9_9_8_5
lowerCamelCase_ ='''facebook/detr-resnet-50'''
lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase )
lowerCamelCase_ =object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''', threshold=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.9_9_8_8, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.9_9_8_7, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
], )
@require_torch
@require_pytesseract
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''Narsil/layoutlmv3-finetuned-funsd'''
lowerCamelCase_ =0.9_9_9_3
lowerCamelCase_ =pipeline('''object-detection''', model=lowerCAmelCase, threshold=lowerCAmelCase )
lowerCamelCase_ =object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(lowerCAmelCase, decimals=4 ), [
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.9_9_9_3, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
], )
| 350 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
a_ : List[Any] = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a_ : List[Any] = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a_ : Any = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def lowercase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase_ =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase_ =evaluate(dataset=lowerCAmelCase, predictions=lowerCAmelCase )
return score
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Optional[int] = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : int = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""CLIPFeatureExtractor"""]
a_ : List[str] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a_ ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase_ =Dataset.from_dict(__snake_case )
return dataset
class __UpperCamelCase ( lowerCamelCase__ ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ), 2 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =get_dataset()
lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ), 2 )
print(lowerCAmelCase )
self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[str] = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def a_ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.shape(__snake_case )
lowerCamelCase_ =np.shape(__snake_case )
lowerCamelCase_ =np.shape(__snake_case )
if shape_a[0] != shape_b[0]:
lowerCamelCase_ =(
'''Expected the same number of rows for A and B. '''
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(__snake_case )
if shape_b[1] != shape_c[1]:
lowerCamelCase_ =(
'''Expected the same number of columns for B and C. '''
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(__snake_case )
lowerCamelCase_ =pseudo_inv
if a_inv is None:
try:
lowerCamelCase_ =np.linalg.inv(__snake_case )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1], [6, 3]] )
lowerCamelCase_ =schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =np.block([[a, b], [b.T, c]] )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
lowerCamelCase_ =np.linalg.det(lowerCAmelCase )
self.assertAlmostEqual(lowerCAmelCase, det_a * det_s )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase_ =np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase_ =np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCAmelCase ):
schur_complement(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 354 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def a_ ( __snake_case : int = 150_0000 ) -> int:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case )
lowerCamelCase_ =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ):
if gcd(__snake_case , __snake_case ) > 1:
continue
lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case , limit + 1 , __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ : List[str] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ : Optional[int] = TaTokenizerFast
a_ : int = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 355 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ : Tuple = 16
a_ : Optional[int] = 32
def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str:
"""simple docstring"""
lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__snake_case : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase_ =datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase_ =16
elif accelerator.mixed_precision != "no":
lowerCamelCase_ =8
else:
lowerCamelCase_ =None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCamelCase_ =DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
lowerCamelCase_ =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ : Tuple = mocked_dataloaders # noqa: F811
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1":
lowerCamelCase_ =2
# Initialize accelerator
lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ =config['''lr''']
lowerCamelCase_ =int(config['''num_epochs'''] )
lowerCamelCase_ =int(config['''seed'''] )
lowerCamelCase_ =int(config['''batch_size'''] )
lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__snake_case )
def inner_training_loop(__snake_case : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case )
lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case )
# Instantiate scheduler
lowerCamelCase_ =get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase_ =model(**__snake_case )
lowerCamelCase_ =outputs.logits.argmax(dim=-1 )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCamelCase_ =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowerCamelCase_ =parser.parse_args()
lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 6 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : jnp.ndarray
lowercase : jnp.ndarray
class __UpperCamelCase ( nn.Module ):
lowercase : int
lowercase : Tuple[int] =(16, 32, 96, 2_56)
lowercase : jnp.dtype =jnp.floataa
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase_ =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ =self.block_out_channels[i]
lowerCamelCase_ =self.block_out_channels[i + 1]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCAmelCase )
lowerCamelCase_ =blocks
lowerCamelCase_ =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
for block in self.blocks:
lowerCamelCase_ =block(lowerCAmelCase )
lowerCamelCase_ =nn.silu(lowerCAmelCase )
lowerCamelCase_ =self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class __UpperCamelCase ( nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =32
lowercase : int =4
lowercase : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase : Union[bool, Tuple[bool]] =False
lowercase : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
lowercase : int =2
lowercase : Union[int, Tuple[int]] =8
lowercase : Optional[Union[int, Tuple[int]]] =None
lowercase : int =12_80
lowercase : float =0.0
lowercase : bool =False
lowercase : jnp.dtype =jnp.floataa
lowercase : bool =True
lowercase : int =0
lowercase : str ="rgb"
lowercase : Tuple[int] =(16, 32, 96, 2_56)
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_ =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase_ =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase_ =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ =jnp.zeros(lowerCAmelCase, dtype=jnp.floataa )
lowerCamelCase_, lowerCamelCase_ =jax.random.split(lowerCAmelCase )
lowerCamelCase_ ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )["params"]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.block_out_channels
lowerCamelCase_ =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase_ =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase_ =FlaxTimestepEmbedding(lowerCAmelCase, dtype=self.dtype )
lowerCamelCase_ =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase_ =self.only_cross_attention
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ =[]
lowerCamelCase_ =[]
lowerCamelCase_ =block_out_channels[0]
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ =output_channel
lowerCamelCase_ =block_out_channels[i]
lowerCamelCase_ =i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ =FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase_ =FlaxDownBlockaD(
in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCAmelCase )
lowerCamelCase_ =down_blocks
lowerCamelCase_ =controlnet_down_blocks
# mid
lowerCamelCase_ =block_out_channels[-1]
lowerCamelCase_ =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase_ =nn.Conv(
lowerCAmelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1.0, lowerCAmelCase = True, lowerCAmelCase = False, ):
"""simple docstring"""
lowerCamelCase_ =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ =jnp.flip(lowerCAmelCase, axis=1 )
# 1. time
if not isinstance(lowerCAmelCase, jnp.ndarray ):
lowerCamelCase_ =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCAmelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ =jnp.expand_dims(lowerCAmelCase, 0 )
lowerCamelCase_ =self.time_proj(lowerCAmelCase )
lowerCamelCase_ =self.time_embedding(lowerCAmelCase )
# 2. pre-process
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.conv_in(lowerCAmelCase )
lowerCamelCase_ =jnp.transpose(lowerCAmelCase, (0, 2, 3, 1) )
lowerCamelCase_ =self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase_ =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
else:
lowerCamelCase_, lowerCamelCase_ =down_block(lowerCAmelCase, lowerCAmelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ =self.mid_block(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ =()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase, self.controlnet_down_blocks ):
lowerCamelCase_ =controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ =controlnet_down_block_res_samples
lowerCamelCase_ =self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
lowerCamelCase_ =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase, mid_block_res_sample=lowerCAmelCase )
| 356 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a_ : List[Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1 ):
"""simple docstring"""
lowerCamelCase_ =tokenizer
lowerCamelCase_ =dataset
lowerCamelCase_ =len(lowerCAmelCase ) if n_tasks is None else n_tasks
lowerCamelCase_ =n_copies
def __iter__( self ):
"""simple docstring"""
lowerCamelCase_ =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =start_length
lowerCamelCase_ =eof_strings
lowerCamelCase_ =tokenizer
def __call__( self, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCAmelCase )
def a_ ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def a_ ( __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int]=20 , **__snake_case : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
lowerCamelCase_ =batch['''ids'''].shape[-1]
lowerCamelCase_ =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
lowerCamelCase_ =batch['''task_id'''].repeat(__snake_case )
lowerCamelCase_ =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_, lowerCamelCase_ =accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ =generated_tokens.cpu().numpy()
lowerCamelCase_ =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
lowerCamelCase_ =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def a_ ( ) -> Any:
"""simple docstring"""
lowerCamelCase_ =HfArgumentParser(__snake_case )
lowerCamelCase_ =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ ='''false'''
if args.num_workers is None:
lowerCamelCase_ =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
lowerCamelCase_ =AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ =tokenizer.eos_token
lowerCamelCase_ =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ =load_dataset('''openai_humaneval''' )
lowerCamelCase_ =load_metric('''code_eval''' )
lowerCamelCase_ =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCamelCase_ =args.n_samples // args.batch_size
lowerCamelCase_ =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCamelCase_, lowerCamelCase_ =accelerator.prepare(__snake_case , __snake_case )
lowerCamelCase_ =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
lowerCamelCase_ =[]
for task in tqdm(range(__snake_case ) ):
lowerCamelCase_ =human_eval['''test'''][task]['''test''']
lowerCamelCase_ =F'''check({human_eval['test'][task]['entry_point']})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_, lowerCamelCase_ =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.