code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import factorial
a_ = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase__ ) )
def __UpperCAmelCase () -> int:
'''simple docstring'''
a_ = 7 * factorial(9 ) + 1
return sum(i for i in range(3 ,lowercase__ ) if sum_of_digit_factorial(lowercase__ ) == i )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self: str , a: dict[str, list[str]] , a: str) ->None:
'''simple docstring'''
a_ = graph
# mapping node to its parent in resulting breadth first tree
a_ = {}
a_ = source_vertex
def _lowerCAmelCase ( self: int) ->None:
'''simple docstring'''
a_ = {self.source_vertex}
a_ = None
a_ = [self.source_vertex] # first in first out queue
while queue:
a_ = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a)
a_ = vertex
queue.append(a)
def _lowerCAmelCase ( self: str , a: str) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
a_ = self.parent.get(a)
if target_vertex_parent is None:
a_ = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(a)
return self.shortest_path(a) + f"""->{target_vertex}"""
if __name__ == "__main__":
a_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
a_ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase__ ,id=lowercase__ )
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Optional[Any] , *a: int , **a: Union[str, Any]) ->None:
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''trajectory_transformer'''
_UpperCAmelCase =['''past_key_values''']
_UpperCAmelCase ={
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[Any] , a: Optional[int]=1_00 , a: Tuple=5 , a: Union[str, Any]=1 , a: str=1 , a: List[str]=2_49 , a: Tuple=6 , a: Dict=17 , a: List[Any]=25 , a: int=4 , a: int=4 , a: int=1_28 , a: Optional[Any]=0.1 , a: List[str]=0.1 , a: Union[str, Any]=0.1 , a: Dict=0.0006 , a: Dict=5_12 , a: int=0.02 , a: int=1e-12 , a: List[Any]=1 , a: str=True , a: List[str]=1 , a: List[Any]=5_02_56 , a: Tuple=5_02_56 , **a: List[Any] , ) ->Optional[Any]:
'''simple docstring'''
a_ = vocab_size
a_ = action_weight
a_ = reward_weight
a_ = value_weight
a_ = max_position_embeddings
a_ = block_size
a_ = action_dim
a_ = observation_dim
a_ = transition_dim
a_ = learning_rate
a_ = n_layer
a_ = n_head
a_ = n_embd
a_ = embd_pdrop
a_ = attn_pdrop
a_ = resid_pdrop
a_ = initializer_range
a_ = layer_norm_eps
a_ = kaiming_initializer_range
a_ = use_cache
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a_ = float('nan')
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any , a: Optional[int]) ->Any:
'''simple docstring'''
a_ = sys.stdout
a_ = open(a , "a")
def __getattr__( self: List[str] , a: Optional[int]) ->str:
'''simple docstring'''
return getattr(self.stdout , a)
def _lowerCAmelCase ( self: List[str] , a: int) ->Any:
'''simple docstring'''
self.stdout.write(a)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a , 0 , re.M))
def __UpperCAmelCase (lowercase__=80 ,lowercase__=False ) -> Tuple:
'''simple docstring'''
a_ = []
# deal with critical env vars
a_ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
a_ = os.environ.get(lowercase__ ,lowercase__ )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
a_ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(lowercase__ )
# now the normal args
cmd += list(map(shlex.quote ,sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
a_ = []
a_ = ""
while len(lowercase__ ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(lowercase__ ) == 0 or len(lowercase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase__ )
a_ = ""
return "\\\n".join(lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = re.sub(r"[\\\n]+" ," " ,args.base_cmd )
# remove --output_dir if any and set our own
a_ = re.sub("--output_dir\s+[^\s]+" ,"" ,args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
a_ = re.sub("--overwrite_output_dir\s+" ,"" ,args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 ,100 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} ,)
a_ = subprocess.run(lowercase__ ,capture_output=lowercase__ ,text=lowercase__ )
if verbose:
print("STDOUT" ,result.stdout )
print("STDERR" ,result.stderr )
# save the streams
a_ = variation.replace(" " ,"-" )
with open(Path(lowercase__ ) / F"""log.{prefix}.stdout.txt""" ,"w" ) as f:
f.write(result.stdout )
with open(Path(lowercase__ ) / F"""log.{prefix}.stderr.txt""" ,"w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" ,"r" ,encoding="utf-8" ) as f:
a_ = json.load(lowercase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> Optional[int]:
'''simple docstring'''
a_ = []
a_ = []
a_ = F"""{id}: {variation:<{longest_variation_len}}"""
a_ = F"""{preamble}: """
a_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase__ ) ,desc=lowercase__ ,leave=lowercase__ ):
a_ = process_run_single(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
a_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase__ ):
metrics.append(lowercase__ )
results.append(lowercase__ )
outcome += "✓"
else:
outcome += "✘"
a_ = F"""\33[2K\r{outcome}"""
if len(lowercase__ ) > 0:
a_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
a_ = round(mean_metrics[target_metric_key] ,2 )
a_ = F"""{outcome} {mean_target}"""
if len(lowercase__ ) > 1:
results_str += F""" {tuple(round(lowercase__ ,2 ) for x in results )}"""
print(lowercase__ )
a_ = variation
return mean_metrics
else:
print(lowercase__ )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase () -> Tuple:
'''simple docstring'''
a_ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F"""
Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = pd.DataFrame(lowercase__ )
a_ = "variation"
a_ = "diff_%"
a_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
a_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase__ ):
# as a fallback, use the minimal value as the sentinel
a_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase__ ):
a_ = df.apply(
lambda lowercase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 ,axis="columns" ,)
# re-order columns
a_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
a_ = df.reindex(lowercase__ ,axis="columns" ) # reorder cols
# capitalize
a_ = df.rename(str.capitalize ,axis="columns" )
# make the cols as narrow as possible
a_ = df.rename(lambda lowercase__ : c.replace("_" ,"<br>" ) ,axis="columns" )
a_ = df.rename(lambda lowercase__ : c.replace("_" ,"\n" ) ,axis="columns" )
a_ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase__ ,floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase__ ,floatfmt=".2f" )]
print("\n\n".join(lowercase__ ) )
def __UpperCAmelCase () -> Any:
'''simple docstring'''
a_ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" ,default=lowercase__ ,type=lowercase__ ,required=lowercase__ ,help="Base cmd" ,)
parser.add_argument(
"--variations" ,default=lowercase__ ,type=lowercase__ ,nargs="+" ,required=lowercase__ ,help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" ,)
parser.add_argument(
"--base-variation" ,default=lowercase__ ,type=lowercase__ ,help="Baseline variation to compare to. if None the minimal target value will be used to compare against" ,)
parser.add_argument(
"--target-metric-key" ,default=lowercase__ ,type=lowercase__ ,required=lowercase__ ,help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" ,)
parser.add_argument(
"--report-metric-keys" ,default="" ,type=lowercase__ ,help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" ,)
parser.add_argument(
"--repeat-times" ,default=1 ,type=lowercase__ ,help="How many times to re-run each variation - an average will be reported" ,)
parser.add_argument(
"--output_dir" ,default="output_benchmark" ,type=lowercase__ ,help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" ,)
parser.add_argument(
"--verbose" ,default=lowercase__ ,action="store_true" ,help="Whether to show the outputs of each run or just the benchmark progress" ,)
a_ = parser.parse_args()
a_ = args.output_dir
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
a_ = get_base_command(lowercase__ ,lowercase__ )
# split each dimension into its --foo variations
a_ = [list(map(str.strip ,re.split(r"\|" ,lowercase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
a_ = list(map(str.strip ,map(" ".join ,itertools.product(*lowercase__ ) ) ) )
a_ = max(len(lowercase__ ) for x in variations )
# split wanted keys
a_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
a_ = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
a_ = Tee(lowercase__ )
print(F"""\n*** Running {len(lowercase__ )} benchmarks:""" )
print(F"""Base command: {' '.join(lowercase__ )}""" )
a_ = "variation"
a_ = []
for id, variation in enumerate(tqdm(lowercase__ ,desc="Total completion: " ,leave=lowercase__ ) ):
a_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,args.target_metric_key ,lowercase__ ,args.repeat_times ,lowercase__ ,args.verbose ,) )
process_results(lowercase__ ,args.target_metric_key ,lowercase__ ,args.base_variation ,lowercase__ )
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: int , *a: Dict , **a: str) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , *a: Optional[int] , **a: Any) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: List[str] , *a: Optional[int] , **a: Union[str, Any]) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: str , *a: Dict , **a: Any) ->Tuple:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Tuple , *a: Optional[int] , **a: Tuple) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Optional[int] , *a: Tuple , **a: Any) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Optional[Any] , *a: Any , **a: Dict) ->Any:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Dict , *a: Any , **a: List[str]) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: int , *a: Dict , **a: List[Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Any , *a: Union[str, Any] , **a: str) ->Dict:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Optional[int] , *a: Union[str, Any] , **a: Tuple) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , *a: List[str] , **a: Any) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Union[str, Any] , *a: Any , **a: List[str]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: str , *a: Optional[int] , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: str , *a: Optional[Any] , **a: int) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Dict , *a: Dict , **a: Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Optional[int] , *a: int , **a: Optional[int]) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: List[Any] , *a: str , **a: int) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: List[str] , *a: Any , **a: List[str]) ->int:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: List[Any] , *a: Any , **a: Any) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: int , *a: Union[str, Any] , **a: Optional[int]) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Tuple , *a: str , **a: int) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Optional[int] , *a: Any , **a: int) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: str , *a: str , **a: Dict) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: List[str] , *a: Optional[Any] , **a: Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , *a: List[str] , **a: List[Any]) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: List[str] , *a: int , **a: Any) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: int , *a: Optional[int] , **a: Optional[int]) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: str , *a: Union[str, Any] , **a: Any) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Dict , *a: Any , **a: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Any , *a: Optional[int] , **a: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: str , *a: Dict , **a: Tuple) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Any , *a: Optional[int] , **a: List[Any]) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: int , *a: List[Any] , **a: Union[str, Any]) ->int:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Optional[Any] , *a: Tuple , **a: Any) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Dict , *a: Any , **a: Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase_ ):
_UpperCAmelCase =['''flax''']
def __init__( self: Dict , *a: Optional[Any] , **a: int) ->Dict:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , *a: Union[str, Any] , **a: List[Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def _lowerCAmelCase ( cls: Tuple , *a: Any , **a: Dict) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> Tuple:
'''simple docstring'''
stooge(lowercase__ ,0 ,len(lowercase__ ) - 1 )
return arr
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a_ , a_ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a_ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ ,lowercase__ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ ,i + t ,(lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ ,lowercase__ ,(h - t) )
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self: Dict , a: float , a: Callable , a: int , a: float = 1.0 , a: str = None , ) ->Tuple:
'''simple docstring'''
super().__init__()
a_ = initial_learning_rate
a_ = warmup_steps
a_ = power
a_ = decay_schedule_fn
a_ = name
def __call__( self: Tuple , a: Tuple) ->Any:
'''simple docstring'''
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a_ = tf.cast(a , tf.floataa)
a_ = tf.cast(self.warmup_steps , tf.floataa)
a_ = global_step_float / warmup_steps_float
a_ = self.initial_learning_rate * tf.math.pow(a , self.power)
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps) , name=a , )
def _lowerCAmelCase ( self: Any) ->List[str]:
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = 0.0 ,lowercase__ = 0.9 ,lowercase__ = 0.999 ,lowercase__ = 1e-8 ,lowercase__ = None ,lowercase__ = None ,lowercase__ = 0.0 ,lowercase__ = 1.0 ,lowercase__ = None ,) -> Tuple:
'''simple docstring'''
a_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=lowercase__ ,)
if num_warmup_steps:
a_ = WarmUp(
initial_learning_rate=lowercase__ ,decay_schedule_fn=lowercase__ ,warmup_steps=lowercase__ ,)
if weight_decay_rate > 0.0:
a_ = AdamWeightDecay(
learning_rate=lowercase__ ,weight_decay_rate=lowercase__ ,beta_a=lowercase__ ,beta_a=lowercase__ ,epsilon=lowercase__ ,clipnorm=lowercase__ ,global_clipnorm=lowercase__ ,exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] ,include_in_weight_decay=lowercase__ ,)
else:
a_ = tf.keras.optimizers.Adam(
learning_rate=lowercase__ ,beta_a=lowercase__ ,beta_a=lowercase__ ,epsilon=lowercase__ ,clipnorm=lowercase__ ,global_clipnorm=lowercase__ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Any , a: Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , a: float = 0.9 , a: float = 0.999 , a: float = 1e-7 , a: bool = False , a: float = 0.0 , a: Optional[List[str]] = None , a: Optional[List[str]] = None , a: str = "AdamWeightDecay" , **a: List[Any] , ) ->int:
'''simple docstring'''
super().__init__(a , a , a , a , a , a , **a)
a_ = weight_decay_rate
a_ = include_in_weight_decay
a_ = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls: int , a: Tuple) ->Optional[Any]:
'''simple docstring'''
a_ = {"WarmUp": WarmUp}
return super(a , cls).from_config(a , custom_objects=a)
def _lowerCAmelCase ( self: Tuple , a: int , a: List[Any] , a: Any) ->Optional[Any]:
'''simple docstring'''
super(a , self)._prepare_local(a , a , a)
a_ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate")
def _lowerCAmelCase ( self: Any , a: List[Any] , a: List[Any] , a: Optional[int]) ->Dict:
'''simple docstring'''
a_ = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _lowerCAmelCase ( self: Optional[int] , a: List[str] , a: List[Any]=None , **a: Dict) ->Optional[int]:
'''simple docstring'''
a_ , a_ = list(zip(*a))
return super(a , self).apply_gradients(zip(a , a) , name=a , **a)
def _lowerCAmelCase ( self: Any , a: Optional[int] , a: Dict , a: List[Any]) ->str:
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a_ = apply_state or {}
a_ = apply_state.get((var_device, var_dtype))
if coefficients is None:
a_ = self._fallback_apply_state(a , a)
a_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self: str , a: Optional[int] , a: int , a: Tuple=None) ->Optional[Any]:
'''simple docstring'''
a_ , a_ = self._get_lr(var.device , var.dtype.base_dtype , a)
a_ = self._decay_weights_op(a , a , a)
with tf.control_dependencies([decay]):
return super(a , self)._resource_apply_dense(a , a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Tuple , a: Optional[int] , a: List[str] , a: Optional[Any]=None) ->List[Any]:
'''simple docstring'''
a_ , a_ = self._get_lr(var.device , var.dtype.base_dtype , a)
a_ = self._decay_weights_op(a , a , a)
with tf.control_dependencies([decay]):
return super(a , self)._resource_apply_sparse(a , a , a , **a)
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _lowerCAmelCase ( self: List[Any] , a: str) ->Optional[Any]:
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(a , a) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(a , a) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = []
a_ = None
@property
def _lowerCAmelCase ( self: str) ->Dict:
'''simple docstring'''
if self._accum_steps is None:
a_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa) , trainable=a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients")
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self: str , a: str) ->str:
'''simple docstring'''
if not self._gradients:
a_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(a) , trainable=a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
])
if len(a) != len(self._gradients):
raise ValueError(f"""Expected {len(self._gradients)} gradients, but got {len(a)}""")
for accum_gradient, gradient in zip(self._gradients , a):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(a)
self._accum_steps.assign_add(1)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(a))
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __UpperCAmelCase (lowercase__ ,lowercase__=False ) -> Any:
'''simple docstring'''
a_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a_ = ""
else:
a_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
a_ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a_ = in_proj_weight[
: config.hidden_size, :
]
a_ = in_proj_bias[: config.hidden_size]
a_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a_ = in_proj_weight[
-config.hidden_size :, :
]
a_ = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase (lowercase__ ) -> Optional[int]:
'''simple docstring'''
a_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(lowercase__ ,lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase__ ,lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = dct.pop(lowercase__ )
a_ = val
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = ViTMSNConfig()
a_ = 1000
a_ = "datasets/huggingface/label-files"
a_ = "imagenet-1k-id2label.json"
a_ = json.load(open(hf_hub_download(lowercase__ ,lowercase__ ) ,"r" ) )
a_ = {int(lowercase__ ): v for k, v in idalabel.items()}
a_ = idalabel
a_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
a_ = 384
a_ = 1536
a_ = 6
elif "l16" in checkpoint_url:
a_ = 1024
a_ = 4096
a_ = 24
a_ = 16
a_ = 0.1
elif "b4" in checkpoint_url:
a_ = 4
elif "l7" in checkpoint_url:
a_ = 7
a_ = 1024
a_ = 4096
a_ = 24
a_ = 16
a_ = 0.1
a_ = ViTMSNModel(lowercase__ )
a_ = torch.hub.load_state_dict_from_url(lowercase__ ,map_location="cpu" )["target_encoder"]
a_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowercase__ )
a_ = create_rename_keys(lowercase__ ,base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ ,lowercase__ ,lowercase__ )
read_in_q_k_v(lowercase__ ,lowercase__ ,base_model=lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
a_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
a_ = Image.open(requests.get(lowercase__ ,stream=lowercase__ ).raw )
a_ = ViTImageProcessor(
size=config.image_size ,image_mean=lowercase__ ,image_std=lowercase__ )
a_ = image_processor(images=lowercase__ ,return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
a_ = model(**lowercase__ )
a_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
a_ = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
a_ = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
a_ = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
a_ = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
a_ = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] ,lowercase__ ,atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self: int , a: int) ->None:
'''simple docstring'''
a_ = num_of_nodes
a_ = []
a_ = {}
def _lowerCAmelCase ( self: Optional[Any] , a: int , a: int , a: int) ->None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight])
def _lowerCAmelCase ( self: Optional[int] , a: int) ->int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node])
def _lowerCAmelCase ( self: str , a: int) ->None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
a_ = self.find_component(a)
def _lowerCAmelCase ( self: Optional[int] , a: list[int] , a: int , a: int) ->None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
a_ = v_node
component_size[v_node] += component_size[u_node]
self.set_component(a)
elif component_size[u_node] >= component_size[v_node]:
a_ = self.find_component(a)
component_size[u_node] += component_size[v_node]
self.set_component(a)
def _lowerCAmelCase ( self: int) ->None:
'''simple docstring'''
a_ = []
a_ = 0
a_ = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes):
self.m_component.update({node: node})
component_size.append(1)
a_ = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
a_ , a_ , a_ = edge
a_ = self.m_component[u]
a_ = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
a_ = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(a , a):
a_ , a_ , a_ = edge
a_ = self.m_component[u]
a_ = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(a , a , a)
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""")
num_of_components -= 1
a_ = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""")
def __UpperCAmelCase () -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def __UpperCAmelCase (lowercase__ ) -> Tuple:
'''simple docstring'''
a_ = np.max(lowercase__ ,axis=-1 ,keepdims=lowercase__ )
a_ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=lowercase__ )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: str , **a: Any) ->Dict:
'''simple docstring'''
a_ = {}
if "second_text" in kwargs:
a_ = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _lowerCAmelCase ( self: List[str] , a: Any , a: List[Any]=None) ->Dict:
'''simple docstring'''
return self.tokenizer(a , text_pair=a , return_tensors=self.framework)
def _lowerCAmelCase ( self: int , a: Dict) ->str:
'''simple docstring'''
return self.model(**a)
def _lowerCAmelCase ( self: Union[str, Any] , a: Optional[Any]) ->str:
'''simple docstring'''
a_ = model_outputs.logits[0].numpy()
a_ = softmax(a)
a_ = np.argmax(a)
a_ = self.model.config.idalabel[best_class]
a_ = probabilities[best_class].item()
a_ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: Optional[Any] , a: int , a: int=7 , a: Union[str, Any]=3 , a: Tuple=18 , a: str=30 , a: Dict=4_00 , a: str=True , a: Optional[Any]=None , a: Dict=True , a: Optional[int]=None , a: List[str]=True , a: Dict=[0.5, 0.5, 0.5] , a: Union[str, Any]=[0.5, 0.5, 0.5] , ) ->List[str]:
'''simple docstring'''
a_ = size if size is not None else {"shortest_edge": 18}
a_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_center_crop
a_ = crop_size
a_ = do_normalize
a_ = image_mean
a_ = image_std
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LevitImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = LevitImageProcessingTester(self)
@property
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , "image_mean"))
self.assertTrue(hasattr(a , "image_std"))
self.assertTrue(hasattr(a , "do_normalize"))
self.assertTrue(hasattr(a , "do_resize"))
self.assertTrue(hasattr(a , "do_center_crop"))
self.assertTrue(hasattr(a , "size"))
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
return x + 2
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
a_ = "x = 3"
a_ = {}
a_ = evaluate(a , {} , state=a)
assert result == 3
self.assertDictEqual(a , {"x": 3})
a_ = "x = y"
a_ = {"y": 5}
a_ = evaluate(a , {} , state=a)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 5, "y": 5})
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
a_ = "y = add_two(x)"
a_ = {"x": 3}
a_ = evaluate(a , {"add_two": add_two} , state=a)
assert result == 5
self.assertDictEqual(a , {"x": 3, "y": 5})
# Won't work without the tool
with CaptureStdout() as out:
a_ = evaluate(a , {} , state=a)
assert result is None
assert "tried to execute add_two" in out.out
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
a_ = "x = 3"
a_ = {}
a_ = evaluate(a , {} , state=a)
assert result == 3
self.assertDictEqual(a , {"x": 3})
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = "test_dict = {'x': x, 'y': add_two(x)}"
a_ = {"x": 3}
a_ = evaluate(a , {"add_two": add_two} , state=a)
self.assertDictEqual(a , {"x": 3, "y": 5})
self.assertDictEqual(a , {"x": 3, "test_dict": {"x": 3, "y": 5}})
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = "x = 3\ny = 5"
a_ = {}
a_ = evaluate(a , {} , state=a)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 3, "y": 5})
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
a_ = "text = f'This is x: {x}.'"
a_ = {"x": 3}
a_ = evaluate(a , {} , state=a)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a , {"x": 3, "text": "This is x: 3."})
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
a_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
a_ = {"x": 3}
a_ = evaluate(a , {} , state=a)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a , {"x": 3, "y": 2})
a_ = {"x": 8}
a_ = evaluate(a , {} , state=a)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a , {"x": 8, "y": 5})
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = "test_list = [x, add_two(x)]"
a_ = {"x": 3}
a_ = evaluate(a , {"add_two": add_two} , state=a)
self.assertListEqual(a , [3, 5])
self.assertDictEqual(a , {"x": 3, "test_list": [3, 5]})
def _lowerCAmelCase ( self: Optional[int]) ->Any:
'''simple docstring'''
a_ = "y = x"
a_ = {"x": 3}
a_ = evaluate(a , {} , state=a)
assert result == 3
self.assertDictEqual(a , {"x": 3, "y": 3})
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = "test_list = [x, add_two(x)]\ntest_list[1]"
a_ = {"x": 3}
a_ = evaluate(a , {"add_two": add_two} , state=a)
assert result == 5
self.assertDictEqual(a , {"x": 3, "test_list": [3, 5]})
a_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
a_ = {"x": 3}
a_ = evaluate(a , {"add_two": add_two} , state=a)
assert result == 5
self.assertDictEqual(a , {"x": 3, "test_dict": {"x": 3, "y": 5}})
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
a_ = "x = 0\nfor i in range(3):\n x = i"
a_ = {}
a_ = evaluate(a , {"range": range} , state=a)
assert result == 2
self.assertDictEqual(a , {"x": 2, "i": 2})
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=8 ) -> str:
'''simple docstring'''
a_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: UNetaDConditionModel , a: DDPMScheduler , a: VQModel , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
a_ = 2 ** (len(self.movq.config.block_out_channels) - 1)
def _lowerCAmelCase ( self: Union[str, Any] , a: str , a: Dict , a: List[str] , a: Optional[Any] , a: Union[str, Any] , a: List[Any]) ->int:
'''simple docstring'''
if latents is None:
a_ = randn_tensor(a , generator=a , device=a , dtype=a)
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""")
a_ = latents.to(a)
a_ = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase ( self: List[Any] , a: int=0) ->List[str]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a_ = torch.device(f"""cuda:{gpu_id}""")
a_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a)
def _lowerCAmelCase ( self: Optional[int] , a: Dict=0) ->Optional[Any]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
a_ = torch.device(f"""cuda:{gpu_id}""")
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
a_ , a_ = cpu_offload_with_hook(a , a , prev_module_hook=a)
# We'll offload the last model manually.
a_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(a)
def __call__( self: Optional[int] , a: Union[torch.FloatTensor, List[torch.FloatTensor]] , a: Union[torch.FloatTensor, List[torch.FloatTensor]] , a: int = 5_12 , a: int = 5_12 , a: int = 1_00 , a: float = 4.0 , a: int = 1 , a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , a: Optional[torch.FloatTensor] = None , a: Optional[str] = "pil" , a: bool = True , ) ->Union[str, Any]:
'''simple docstring'''
a_ = self._execution_device
a_ = guidance_scale > 1.0
if isinstance(a , a):
a_ = torch.cat(a , dim=0)
a_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(a , a):
a_ = torch.cat(a , dim=0)
if do_classifier_free_guidance:
a_ = image_embeds.repeat_interleave(a , dim=0)
a_ = negative_image_embeds.repeat_interleave(a , dim=0)
a_ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=a)
self.scheduler.set_timesteps(a , device=a)
a_ = self.scheduler.timesteps
a_ = self.unet.config.in_channels
a_ , a_ = downscale_height_and_width(a , a , self.movq_scale_factor)
# create initial latent
a_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a)):
# expand the latents if we are doing classifier free guidance
a_ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a_ = {"image_embeds": image_embeds}
a_ = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1)
a_ , a_ = noise_pred.chunk(2)
a_ , a_ = variance_pred.chunk(2)
a_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a_ = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a_ , a_ = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
a_ = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
a_ = self.movq.decode(a , force_not_quantize=a)["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""")
if output_type in ["np", "pil"]:
a_ = image * 0.5 + 0.5
a_ = image.clamp(0 , 1)
a_ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a_ = self.numpy_to_pil(a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a)
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self: Union[str, Any] , a: Union[str, Any] , a: Optional[Any]=7 , a: Union[str, Any]=3 , a: Union[str, Any]=18 , a: List[str]=30 , a: List[Any]=4_00 , a: str=True , a: Optional[int]=None , a: Optional[int]=True , ) ->Dict:
'''simple docstring'''
a_ = size if size is not None else {"height": 18, "width": 18}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = image_size
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = apply_ocr
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCAmelCase ( self: List[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = LayoutLMvaImageProcessingTester(self)
@property
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self: List[str]) ->Tuple:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a , "do_resize"))
self.assertTrue(hasattr(a , "size"))
self.assertTrue(hasattr(a , "apply_ocr"))
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
a_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a)
for image in image_inputs:
self.assertIsInstance(a , Image.Image)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt")
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a)
self.assertIsInstance(encoding.boxes , a)
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a)
for image in image_inputs:
self.assertIsInstance(a , np.ndarray)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCAmelCase ( self: List[Any]) ->List[str]:
'''simple docstring'''
a_ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a)
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor)
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a_ = image_processing(a , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowerCAmelCase ( self: Dict) ->Optional[int]:
'''simple docstring'''
a_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
a_ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test")
a_ = Image.open(ds[0]["file"]).convert("RGB")
a_ = image_processing(a , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a_ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a_ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a)
self.assertListEqual(encoding.boxes , a)
# with apply_OCR = False
a_ = LayoutLMvaImageProcessor(apply_ocr=a)
a_ = image_processing(a , return_tensors="pt")
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
a_ = get_activation("gelu")
self.assertTrue(torch.allclose(gelu_python(a) , torch_builtin(a)))
self.assertFalse(torch.allclose(gelu_python(a) , gelu_new(a)))
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
a_ = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00])
a_ = get_activation("gelu")
a_ = get_activation("gelu_10")
a_ = torch_builtin(a)
a_ = geluaa(a)
a_ = torch.where(y_gelu_aa < 10.0 , 1 , 0)
self.assertTrue(torch.max(a).item() == 10.0)
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask))
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
get_activation("gelu")
get_activation("gelu_10")
get_activation("gelu_fast")
get_activation("gelu_new")
get_activation("gelu_python")
get_activation("gelu_pytorch_tanh")
get_activation("linear")
get_activation("mish")
get_activation("quick_gelu")
get_activation("relu")
get_activation("sigmoid")
get_activation("silu")
get_activation("swish")
get_activation("tanh")
with self.assertRaises(a):
get_activation("bogus")
with self.assertRaises(a):
get_activation(a)
def _lowerCAmelCase ( self: Dict) ->Dict:
'''simple docstring'''
a_ = get_activation("gelu")
a_ = 1
a_ = get_activation("gelu")
self.assertEqual(acta.a , 1)
with self.assertRaises(a):
a_ = acta.a
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
from math import loga
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ ,lowercase__ ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
a_ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ):
def __init__( self: Optional[Any] , a: str = " ") ->int:
'''simple docstring'''
a_ = sentence_delimiter
def _lowerCAmelCase ( self: Any , a: str) ->List[Any]:
'''simple docstring'''
return list(a)
def _lowerCAmelCase ( self: Tuple , a: List[str]) ->Tuple:
'''simple docstring'''
a_ = []
for sent_idx, sentence in enumerate(a):
chars.extend(self.process_string(a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(a) - 1:
chars.append(self.sentence_delimiter)
return chars
a_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
a_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
a_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
a_ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
a_ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: str) ->Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self: List[str] , a: List[str] , a: str , a: Any=False) ->Any:
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
a , a , truth_transform=a , hypothesis_transform=a , )["wer"]
a_ = 0
a_ = 0
for prediction, reference in zip(a , a):
a_ = jiwer.compute_measures(
a , a , truth_transform=a , hypothesis_transform=a , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase () -> Tuple:
'''simple docstring'''
a_ = "https://pypi.org/pypi/diffusers/json"
a_ = json.loads(request.urlopen(lowercase__ ).read() )["releases"].keys()
return sorted(lowercase__ ,key=lambda lowercase__ : version.Version(lowercase__ ) )
def __UpperCAmelCase () -> Optional[int]:
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowercase__ )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
a_ = Path(lowercase__ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
init_hf_modules()
a_ = Path(lowercase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowercase__ ,exist_ok=lowercase__ )
a_ = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase (lowercase__ ) -> List[str]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
# Imports of the form `import .xxx`
a_ = re.findall("^\s*import\s+\.(\S+)\s*$" ,lowercase__ ,flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" ,lowercase__ ,flags=re.MULTILINE )
# Unique-ify
return list(set(lowercase__ ) )
def __UpperCAmelCase (lowercase__ ) -> Dict:
'''simple docstring'''
a_ = False
a_ = [module_file]
a_ = []
# Let's recurse through all relative imports
while not no_change:
a_ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowercase__ ) )
a_ = Path(lowercase__ ).parent
a_ = [str(module_path / m ) for m in new_imports]
a_ = [f for f in new_import_files if f not in all_relative_imports]
a_ = [F"""{f}.py""" for f in new_import_files]
a_ = len(lowercase__ ) == 0
all_relative_imports.extend(lowercase__ )
return all_relative_imports
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
# Imports of the form `import xxx`
a_ = re.findall("^\s*import\s+(\S+)\s*$" ,lowercase__ ,flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" ,lowercase__ ,flags=re.MULTILINE )
# Only keep the top-level module
a_ = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
a_ = list(set(lowercase__ ) )
a_ = []
for imp in imports:
try:
importlib.import_module(lowercase__ )
except ImportError:
missing_packages.append(lowercase__ )
if len(lowercase__ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F"""{', '.join(lowercase__ )}. Run `pip install {' '.join(lowercase__ )}`""" )
return get_relative_imports(lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = module_path.replace(os.path.sep ,"." )
a_ = importlib.import_module(lowercase__ )
if class_name is None:
return find_pipeline_class(lowercase__ )
return getattr(lowercase__ ,lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
from ..pipelines import DiffusionPipeline
a_ = dict(inspect.getmembers(lowercase__ ,inspect.isclass ) )
a_ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls ,lowercase__ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
F""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
F""" {loaded_module}.""" )
a_ = cls
return pipeline_class
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = False ,lowercase__ = False ,lowercase__ = None ,lowercase__ = None ,lowercase__ = None ,lowercase__ = False ,) -> Union[str, Any]:
'''simple docstring'''
a_ = str(lowercase__ )
a_ = os.path.join(lowercase__ ,lowercase__ )
if os.path.isfile(lowercase__ ):
a_ = module_file_or_url
a_ = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
a_ = get_diffusers_versions()
# cut ".dev0"
a_ = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
a_ = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
a_ = F"""v{revision}"""
elif revision == "main":
a_ = revision
else:
raise ValueError(
F"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
F""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
a_ = COMMUNITY_PIPELINES_URL.format(revision=lowercase__ ,pipeline=lowercase__ )
try:
a_ = cached_download(
lowercase__ ,cache_dir=lowercase__ ,force_download=lowercase__ ,proxies=lowercase__ ,resume_download=lowercase__ ,local_files_only=lowercase__ ,use_auth_token=lowercase__ ,)
a_ = "git"
a_ = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
a_ = hf_hub_download(
lowercase__ ,lowercase__ ,cache_dir=lowercase__ ,force_download=lowercase__ ,proxies=lowercase__ ,resume_download=lowercase__ ,local_files_only=lowercase__ ,use_auth_token=lowercase__ ,)
a_ = os.path.join("local" ,"--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
a_ = check_imports(lowercase__ )
# Now we move the module inside our cached dynamic modules.
a_ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowercase__ )
a_ = Path(lowercase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowercase__ ,submodule_path / module_file )
for module_needed in modules_needed:
a_ = F"""{module_needed}.py"""
shutil.copy(os.path.join(lowercase__ ,lowercase__ ) ,submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowercase__ ,lowercase__ ):
a_ = use_auth_token
elif use_auth_token is True:
a_ = HfFolder.get_token()
else:
a_ = None
a_ = model_info(lowercase__ ,revision=lowercase__ ,token=lowercase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
a_ = submodule_path / commit_hash
a_ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowercase__ )
if not (submodule_path / module_file).exists():
shutil.copy(lowercase__ ,submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowercase__ ,F"""{module_needed}.py""" ,cache_dir=lowercase__ ,force_download=lowercase__ ,resume_download=lowercase__ ,proxies=lowercase__ ,use_auth_token=lowercase__ ,revision=lowercase__ ,local_files_only=lowercase__ ,)
return os.path.join(lowercase__ ,lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = None ,lowercase__ = False ,lowercase__ = False ,lowercase__ = None ,lowercase__ = None ,lowercase__ = None ,lowercase__ = False ,**lowercase__ ,) -> Optional[Any]:
'''simple docstring'''
a_ = get_cached_module_file(
lowercase__ ,lowercase__ ,cache_dir=lowercase__ ,force_download=lowercase__ ,resume_download=lowercase__ ,proxies=lowercase__ ,use_auth_token=lowercase__ ,revision=lowercase__ ,local_files_only=lowercase__ ,)
return get_class_in_module(lowercase__ ,final_module.replace(".py" ,"" ) )
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from ... import PretrainedConfig
a_ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_UpperCAmelCase ='''nezha'''
def __init__( self: Any , a: int=2_11_28 , a: Tuple=7_68 , a: Any=12 , a: Tuple=12 , a: Tuple=30_72 , a: Any="gelu" , a: Any=0.1 , a: int=0.1 , a: Optional[int]=5_12 , a: Any=64 , a: Optional[Any]=2 , a: str=0.02 , a: Union[str, Any]=1e-12 , a: str=0.1 , a: Tuple=0 , a: Dict=2 , a: Any=3 , a: Union[str, Any]=True , **a: int , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = max_relative_position
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = classifier_dropout
a_ = use_cache
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from functools import reduce
a_ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __UpperCAmelCase (lowercase__ = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase__ ,lowercase__ : str(int(lowercase__ ) * int(lowercase__ ) ) ,n[i : i + 13] ) )
for i in range(len(lowercase__ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(CMStochasticIterativeScheduler,)
_UpperCAmelCase =10
def _lowerCAmelCase ( self: List[str] , **a: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**a)
return config
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = 10
a_ = self.get_scheduler_config()
a_ = self.scheduler_classes[0](**a)
scheduler.set_timesteps(a)
a_ = scheduler.timesteps[0]
a_ = scheduler.timesteps[1]
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = scheduler.step(a , a , a).prev_sample
a_ = scheduler.step(a , a , a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = 1
scheduler.set_timesteps(a)
a_ = scheduler.timesteps
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a):
# 1. scale model input
a_ = scheduler.scale_model_input(a , a)
# 2. predict noise residual
a_ = model(a , a)
# 3. predict previous sample x_t-1
a_ = scheduler.step(a , a , a , generator=a).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 192.7614) < 1e-2
assert abs(result_mean.item() - 0.2510) < 1e-3
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = [1_06, 0]
scheduler.set_timesteps(timesteps=a)
a_ = scheduler.timesteps
a_ = torch.manual_seed(0)
a_ = self.dummy_model()
a_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
a_ = scheduler.scale_model_input(a , a)
# 2. predict noise residual
a_ = model(a , a)
# 3. predict previous sample x_t-1
a_ = scheduler.step(a , a , a , generator=a).prev_sample
a_ = pred_prev_sample
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 347.6357) < 1e-2
assert abs(result_mean.item() - 0.4527) < 1e-3
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg="`timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=a)
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = [39, 30, 12, 1, 0]
a_ = len(a)
with self.assertRaises(a , msg="Can only pass one of `num_inference_steps` or `timesteps`."):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a)
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=a)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowercase_ )} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''The input training data file (a text file).'''} )
_UpperCAmelCase =field(
default=lowercase_ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_UpperCAmelCase =field(default=lowercase_ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_UpperCAmelCase =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_UpperCAmelCase =field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_UpperCAmelCase =field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_UpperCAmelCase =field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_UpperCAmelCase =field(
default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = False ,lowercase__ = None ,) -> List[Any]:
'''simple docstring'''
def _dataset(lowercase__ ,lowercase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=lowercase__ ,file_path=lowercase__ ,block_size=args.block_size ,ref_path=lowercase__ ,)
return LineByLineTextDataset(tokenizer=lowercase__ ,file_path=lowercase__ ,block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase__ ,file_path=lowercase__ ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=lowercase__ ,)
if evaluate:
return _dataset(args.eval_data_file ,args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file ,args.train_ref_file )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
a_ , a_ , a_ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" ,lowercase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
a_ = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a_ = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
a_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
a_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
a_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
a_ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=lowercase__ ,cache_dir=model_args.cache_dir ,)
else:
logger.info("Training new model from scratch" )
a_ = AutoModelWithLMHead.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
a_ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
a_ = min(data_args.block_size ,tokenizer.max_len )
# Get datasets
a_ = (
get_dataset(lowercase__ ,tokenizer=lowercase__ ,cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
a_ = (
get_dataset(lowercase__ ,tokenizer=lowercase__ ,evaluate=lowercase__ ,cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
a_ = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase__ ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,)
else:
if data_args.mlm and data_args.whole_word_mask:
a_ = DataCollatorForWholeWordMask(
tokenizer=lowercase__ ,mlm_probability=data_args.mlm_probability )
else:
a_ = DataCollatorForLanguageModeling(
tokenizer=lowercase__ ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
a_ = Trainer(
model=lowercase__ ,args=lowercase__ ,data_collator=lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,prediction_loss_only=lowercase__ ,)
# Training
if training_args.do_train:
a_ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a_ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a_ = trainer.evaluate()
a_ = math.exp(eval_output["eval_loss"] )
a_ = {"perplexity": perplexity}
a_ = os.path.join(training_args.output_dir ,"eval_results_lm.txt" )
if trainer.is_world_master():
with open(lowercase__ ,"w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" ,lowercase__ ,str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(lowercase__ )
return results
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
a_ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
a_ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
a_ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
a_ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_UpperCAmelCase =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" ,["default", 0, 100 * 2**20, 900 * 2**20] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,"IN_MEMORY_MAX_SIZE" ,lowercase__ )
a_ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a_ = dataset_size < in_memory_max_size
else:
a_ = False
a_ = is_small_dataset(lowercase__ )
assert result == expected
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
a_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
a_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
a_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _lowerCAmelCase ( self: int , a: List[str]=None , a: Dict=None , a: Optional[Any]=False) ->List[Any]:
'''simple docstring'''
if concatenate_texts:
return compute_measures(a , a)["wer"]
else:
a_ = 0
a_ = 0
for prediction, reference in zip(a , a):
a_ = compute_measures(a , a)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __lt__( self: Union[str, Any] , a: Union[str, Any]) ->List[Any]:
'''simple docstring'''
return self[-1] < other[-1]
def __eq__( self: Dict , a: Dict) ->Any:
'''simple docstring'''
return self[-1] == other[-1]
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = []
# sort into stacks
for element in collection:
a_ = Stack([element] )
a_ = bisect_left(lowercase__ ,lowercase__ )
if i != len(lowercase__ ):
stacks[i].append(lowercase__ )
else:
stacks.append(lowercase__ )
# use a heap-based merge to merge stack efficiently
a_ = merge(*(reversed(lowercase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
a_ = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
a_ = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
a_ = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _lowerCAmelCase ( self: Optional[Any]) ->List[Any]:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float")),
"references": datasets.Sequence(datasets.Value("float")),
}
else:
return {
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}
def _lowerCAmelCase ( self: Optional[Any] , a: List[str] , a: int , a: Union[str, Any]=None , a: Tuple="uniform_average" , a: int=True) ->int:
'''simple docstring'''
a_ = mean_squared_error(
a , a , sample_weight=a , multioutput=a , squared=a)
return {"mse": mse}
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Union[str, Any] , a: Optional[int] , a: int=13 , a: str=30 , a: List[str]=2 , a: Optional[int]=3 , a: Dict=True , a: Optional[Any]=True , a: int=32 , a: Union[str, Any]=2 , a: List[str]=4 , a: int=37 , a: Dict="gelu" , a: int=0.1 , a: Dict=0.1 , a: str=10 , a: Optional[Any]=0.02 , a: Dict=3 , a: Optional[int]=0.6 , a: Union[str, Any]=None , ) ->Dict:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = mask_ratio
a_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self: Tuple) ->Any:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCAmelCase ( self: int , a: Any , a: Optional[int] , a: int) ->Dict:
'''simple docstring'''
a_ = TFViTMAEModel(config=a)
a_ = model(a , training=a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Union[str, Any] , a: str , a: Dict , a: int) ->Dict:
'''simple docstring'''
a_ = TFViTMAEForPreTraining(a)
a_ = model(a , training=a)
# expected sequence length = num_patches
a_ = (self.image_size // self.patch_size) ** 2
a_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
a_ = 1
a_ = TFViTMAEForPreTraining(a)
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a_ = model(a , training=a)
a_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def _lowerCAmelCase ( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
((a_) , (a_) , (a_)) = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_UpperCAmelCase ={'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
a_ = TFViTMAEModelTester(self)
a_ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds")
def _lowerCAmelCase ( self: Any) ->Union[str, Any]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , tf.keras.layers.Layer))
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a)
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
np.random.seed(2)
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = int((config.image_size // config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = self._prepare_for_class(a , a)
a_ = model(a , noise=a)
a_ = copy.deepcopy(self._prepare_for_class(a , a))
a_ = model(**a , noise=a)
a_ = outputs_dict[0].numpy()
a_ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1e-6)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
np.random.seed(2)
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = int((config.image_size // config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(a: Any):
a_ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a):
a_ = v.numpy()
else:
a_ = np.array(a)
return inputs_np_dict
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = self._prepare_for_class(a , a)
a_ = prepare_numpy_arrays(a)
a_ = model(a , noise=a)
a_ = model(**a , noise=a)
self.assert_outputs_same(a , a)
def _lowerCAmelCase ( self: Any , a: str , a: str , a: Optional[int]) ->int:
'''simple docstring'''
np.random.seed(2)
a_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a_ = tf.constant(a)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
a_ = tf_noise
super().check_pt_tf_models(a , a , a)
def _lowerCAmelCase ( self: Tuple) ->List[Any]:
'''simple docstring'''
np.random.seed(2)
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(a)
if module_member_name.endswith("MainLayer")
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")]
for module_member in (getattr(a , a),)
if isinstance(a , a)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a , "_keras_serializable" , a)
}
a_ = int((config.image_size // config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
a_ = tf.convert_to_tensor(a)
inputs_dict.update({"noise": noise})
for main_layer_class in tf_main_layer_classes:
a_ = main_layer_class(a)
a_ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
a_ = tf.keras.Model(a , outputs=main_layer(a))
a_ = model(a)
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = os.path.join(a , "keras_model.h5")
model.save(a)
a_ = tf.keras.models.load_model(
a , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(a , tf.keras.Model)
a_ = model(a)
self.assert_outputs_same(a , a)
@slow
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
np.random.seed(2)
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = int((config.image_size // config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = self._prepare_for_class(a , a)
a_ = model(a , noise=a)
if model_class.__name__ == "TFViTMAEModel":
a_ = outputs.last_hidden_state.numpy()
a_ = 0
else:
a_ = outputs.logits.numpy()
a_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a , saved_model=a)
a_ = model_class.from_pretrained(a)
a_ = model(a , noise=a)
if model_class.__name__ == "TFViTMAEModel":
a_ = after_outputs["last_hidden_state"].numpy()
a_ = 0
else:
a_ = after_outputs["logits"].numpy()
a_ = 0
a_ = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(a , 1e-5)
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
np.random.seed(2)
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = int((config.image_size // config.patch_size) ** 2)
a_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = self._prepare_for_class(a , a)
a_ = model(a , noise=a)
a_ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a)
a_ = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
a_ = model_class.from_config(model.config)
a_ = new_model(a) # Build model
new_model.set_weights(model.get_weights())
a_ = new_model(a , noise=a)
self.assert_outputs_same(a , a)
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.")
def _lowerCAmelCase ( self: Union[str, Any]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load")
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
pass
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224")
self.assertIsNotNone(a)
def __UpperCAmelCase () -> List[str]:
'''simple docstring'''
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
np.random.seed(2)
a_ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base")
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=a , return_tensors="tf")
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
a_ = ViTMAEConfig()
a_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
a_ = np.random.uniform(size=(1, num_patches))
# forward pass
a_ = model(**a , noise=a)
# verify the logits
a_ = tf.convert_to_tensor([1, 1_96, 7_68])
self.assertEqual(outputs.logits.shape , a)
a_ = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , a , atol=1e-4)
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self: str , a: Dict , a: Dict=13 , a: Any=30 , a: Dict=2 , a: Any=3 , a: str=True , a: Optional[int]=True , a: int=32 , a: List[str]=5 , a: str=4 , a: str=37 , a: Tuple="gelu" , a: Tuple=0.1 , a: Tuple=0.1 , a: Tuple=10 , a: Tuple=0.02 , a: Optional[int]=None , a: List[str]=2 , ) ->Tuple:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = is_training
a_ = use_labels
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = type_sequence_label_size
a_ = initializer_range
a_ = scope
a_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a_ = (image_size // patch_size) ** 2
a_ = num_patches + 1
def _lowerCAmelCase ( self: Dict) ->Dict:
'''simple docstring'''
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self: str , a: Union[str, Any] , a: List[str] , a: Dict) ->Optional[int]:
'''simple docstring'''
a_ = ViTModel(config=a)
model.to(a)
model.eval()
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: List[str] , a: Tuple , a: str , a: Any) ->str:
'''simple docstring'''
a_ = ViTForMaskedImageModeling(config=a)
model.to(a)
model.eval()
a_ = model(a)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a_ = 1
a_ = ViTForMaskedImageModeling(a)
model.to(a)
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a_ = model(a)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _lowerCAmelCase ( self: Any , a: Union[str, Any] , a: str , a: Optional[int]) ->int:
'''simple docstring'''
a_ = self.type_sequence_label_size
a_ = ViTForImageClassification(a)
model.to(a)
model.eval()
a_ = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a_ = 1
a_ = ViTForImageClassification(a)
model.to(a)
model.eval()
a_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a_ = model(a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str) ->str:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
a_ = ViTModelTester(self)
a_ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def _lowerCAmelCase ( self: List[Any]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _lowerCAmelCase ( self: Optional[int]) ->List[str]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: str) ->Dict:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear))
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: Any) ->List[str]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a)
def _lowerCAmelCase ( self: str) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
@slow
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = ViTModel.from_pretrained(a)
self.assertIsNotNone(a)
def __UpperCAmelCase () -> List[str]:
'''simple docstring'''
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Optional[int]:
'''simple docstring'''
a_ = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(a)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=a , return_tensors="pt").to(a)
# forward pass
with torch.no_grad():
a_ = model(**a)
# verify the logits
a_ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , a)
a_ = torch.tensor([-0.2744, 0.8215, -0.0836]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4))
@slow
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
a_ = ViTModel.from_pretrained("facebook/dino-vits8").to(a)
a_ = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_80)
a_ = prepare_img()
a_ = image_processor(images=a , return_tensors="pt")
a_ = inputs.pixel_values.to(a)
# forward pass
with torch.no_grad():
a_ = model(a , interpolate_pos_encoding=a)
# verify the logits
a_ = torch.Size((1, 36_01, 3_84))
self.assertEqual(outputs.last_hidden_state.shape , a)
a_ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(a)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
a_ = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=a , return_tensors="pt")
a_ = inputs.pixel_values.to(a)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a_ = model(a)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
a_ , a_ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(10, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self: List[Any] , a: list[int]) ->None:
'''simple docstring'''
a_ = len(a)
a_ = [0] * len_array
if len_array > 0:
a_ = array[0]
for i in range(1 , a):
a_ = self.prefix_sum[i - 1] + array[i]
def _lowerCAmelCase ( self: Optional[Any] , a: int , a: int) ->int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _lowerCAmelCase ( self: Optional[Any] , a: int) ->bool:
'''simple docstring'''
a_ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(a)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
a_ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
a_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowercase__ ,1 ):
if n < _p:
# then we have our last prime to check
a_ = primes[:idx]
break
a_ , a_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
a_ = False
for r in range(lowercase__ ):
a_ = pow(lowercase__ ,d * 2**r ,lowercase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
a_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __UpperCAmelCase () -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''maskformer-swin'''
_UpperCAmelCase ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: int , a: Optional[int]=2_24 , a: Optional[Any]=4 , a: Any=3 , a: int=96 , a: Any=[2, 2, 6, 2] , a: Any=[3, 6, 12, 24] , a: Dict=7 , a: Optional[Any]=4.0 , a: str=True , a: Optional[Any]=0.0 , a: List[Any]=0.0 , a: str=0.1 , a: Optional[Any]="gelu" , a: Optional[int]=False , a: str=0.02 , a: Optional[int]=1e-5 , a: Optional[Any]=None , a: Optional[int]=None , **a: List[str] , ) ->List[str]:
'''simple docstring'''
super().__init__(**a)
a_ = image_size
a_ = patch_size
a_ = num_channels
a_ = embed_dim
a_ = depths
a_ = len(a)
a_ = num_heads
a_ = window_size
a_ = mlp_ratio
a_ = qkv_bias
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = drop_path_rate
a_ = hidden_act
a_ = use_absolute_embeddings
a_ = layer_norm_eps
a_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a_ = int(embed_dim * 2 ** (len(a) - 1))
a_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(a) + 1)]
a_ , a_ = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names)
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCAmelCase (lowercase__ ) -> Optional[Any]:
'''simple docstring'''
if is_torch_version("<" ,"2.0.0" ) or not hasattr(lowercase__ ,"_dynamo" ):
return False
return isinstance(lowercase__ ,torch._dynamo.eval_frame.OptimizedModule )
def __UpperCAmelCase (lowercase__ ,lowercase__ = True ) -> Optional[Any]:
'''simple docstring'''
a_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
a_ = is_compiled_module(lowercase__ )
if is_compiled:
a_ = model
a_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowercase__ ,lowercase__ ):
a_ = model.module
if not keep_fpaa_wrapper:
a_ = getattr(lowercase__ ,"forward" )
a_ = model.__dict__.pop("_original_forward" ,lowercase__ )
if original_forward is not None:
while hasattr(lowercase__ ,"__wrapped__" ):
a_ = forward.__wrapped__
if forward == original_forward:
break
a_ = forward
if getattr(lowercase__ ,"_converted_to_transformer_engine" ,lowercase__ ):
convert_model(lowercase__ ,to_transformer_engine=lowercase__ )
if is_compiled:
a_ = model
a_ = compiled_model
return model
def __UpperCAmelCase () -> Dict:
'''simple docstring'''
PartialState().wait_for_everyone()
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowercase__ ,lowercase__ )
elif PartialState().local_process_index == 0:
torch.save(lowercase__ ,lowercase__ )
@contextmanager
def __UpperCAmelCase (**lowercase__ ) -> Any:
'''simple docstring'''
for key, value in kwargs.items():
a_ = str(lowercase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
if not hasattr(lowercase__ ,"__qualname__" ) and not hasattr(lowercase__ ,"__name__" ):
a_ = getattr(lowercase__ ,"__class__" ,lowercase__ )
if hasattr(lowercase__ ,"__qualname__" ):
return obj.__qualname__
if hasattr(lowercase__ ,"__name__" ):
return obj.__name__
return str(lowercase__ )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
for key, value in source.items():
if isinstance(lowercase__ ,lowercase__ ):
a_ = destination.setdefault(lowercase__ ,{} )
merge_dicts(lowercase__ ,lowercase__ )
else:
a_ = value
return destination
def __UpperCAmelCase (lowercase__ = None ) -> bool:
'''simple docstring'''
if port is None:
a_ = 29500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
a_ = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
a_ = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Optional[Any]) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token") , id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token") , id="sequence") , id="references"),
}) , )
def _lowerCAmelCase ( self: List[str] , a: List[List[List[str]]] , a: List[List[str]] , a: int = 1 , a: int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=a , hypotheses=a , min_len=a , max_len=a)
}
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __UpperCAmelCase (lowercase__ ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ ,lowercase__ ):
a_ = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowercase__ )
a_ = "".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
a_ = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a_ = b"=" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
a_ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] ,2 )]
for index in range(0 ,len(lowercase__ ) ,6 ) ).encode()
+ padding
)
def __UpperCAmelCase (lowercase__ ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
a_ = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ ,lowercase__ ):
try:
a_ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
a_ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a_ = encoded_data[:-padding]
a_ = "".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a_ = "".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
a_ = [
int(binary_stream[index : index + 8] ,2 )
for index in range(0 ,len(lowercase__ ) ,8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
a_ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
a_ = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
a_ = Stack()
a_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase__ )
elif i == ")":
# RULE 4
a_ = operator_stack.peek()
operator_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operand_stack.peek()
operand_stack.pop()
a_ = operators[opr](lowercase__ ,lowercase__ )
operand_stack.push(lowercase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a_ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any , a: list[str]) ->str:
'''simple docstring'''
a_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(a)
self.set_fail_transitions()
def _lowerCAmelCase ( self: Optional[int] , a: int , a: str) ->int | None:
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _lowerCAmelCase ( self: List[str] , a: str) ->None:
'''simple docstring'''
a_ = 0
for character in keyword:
a_ = self.find_next_state(a , a)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
a_ = len(self.adlist) - 1
else:
a_ = next_state
self.adlist[current_state]["output"].append(a)
def _lowerCAmelCase ( self: Dict) ->None:
'''simple docstring'''
a_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(a)
a_ = 0
while q:
a_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a)
a_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"]) is None
and state != 0
):
a_ = self.adlist[state]["fail_state"]
a_ = self.find_next_state(
a , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
a_ = 0
a_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _lowerCAmelCase ( self: List[str] , a: str) ->dict[str, list[int]]:
'''simple docstring'''
a_ = {} # returns a dict with keywords and list of its occurrences
a_ = 0
for i in range(len(a)):
while (
self.find_next_state(a , string[i]) is None
and current_state != 0
):
a_ = self.adlist[current_state]["fail_state"]
a_ = self.find_next_state(a , string[i])
if next_state is None:
a_ = 0
else:
a_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a_ = []
result[key].append(i - len(a) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['ChineseCLIPFeatureExtractor']
a_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
import qiskit
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> qiskit.result.counts.Counts:
'''simple docstring'''
a_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a_ = qiskit.QuantumCircuit(lowercase__ ,lowercase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] ,[0] )
# Execute the circuit on the simulator
a_ = qiskit.execute(lowercase__ ,lowercase__ ,shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(F'Total count for various states are: {single_qubit_measure(1, 1)}')
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
a_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
a_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(a)
from datasets import load_dataset
a_ = load_dataset("nielsr/rvlcdip-demo")
a_ = dataset["train"][0]["image"].convert("RGB")
a_ = image_processor(a , return_tensors="pt").to(a)
# forward pass
with torch.no_grad():
a_ = model(**a)
a_ = outputs.logits
a_ = torch.Size((1, 16))
self.assertEqual(logits.shape , a)
a_ = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4))
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Tuple , a: int = 1_01) ->Tuple:
'''simple docstring'''
a_ = length
def __len__( self: Tuple) ->Any:
'''simple docstring'''
return self.length
def __getitem__( self: Union[str, Any] , a: Union[str, Any]) ->int:
'''simple docstring'''
return i
class SCREAMING_SNAKE_CASE__ :
def __call__( self: Union[str, Any] , a: Optional[int]) ->str:
'''simple docstring'''
return {"input_ids": torch.tensor(a), "labels": torch.tensor(a)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self: int) ->Optional[int]:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
a_ = nn.Linear(1_20 , 80)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[Any] , a: Union[str, Any]=None) ->Dict:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@require_torch_neuroncore
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""--output_dir {output_dir}""".split()
a_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(a , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@require_torch_multi_gpu
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
a_ = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
a_ = self.get_auto_remove_tmp_dir()
a_ = f"""--output_dir {output_dir}""".split()
a_ = ["torchrun"] + distributed_args + args
execute_subprocess_async(a , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a_ = HfArgumentParser((TrainingArguments,))
a_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
a_ = DummyDataset(dataset_length)
def __UpperCAmelCase (lowercase__ ) -> Dict:
'''simple docstring'''
a_ = list(range(len(lowercase__ ) ) )
a_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a_ = 2
a_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a_ = None
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'tokenizer_file': 'tokenizer.json'}
a_ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
_UpperCAmelCase =None
def __init__( self: List[Any] , a: Dict=None , a: Tuple=None , a: Any=None , a: Dict="<unk>" , a: str="<s>" , a: Any="</s>" , a: List[Any]="<pad>" , a: List[str]=False , a: List[str]=False , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , a) != add_prefix_space:
a_ = getattr(a , pre_tok_state.pop("type"))
a_ = add_prefix_space
a_ = pre_tok_class(**a)
a_ = add_prefix_space
def _lowerCAmelCase ( self: int , *a: Any , **a: int) ->BatchEncoding:
'''simple docstring'''
a_ = kwargs.get("is_split_into_words" , a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs.")
return super()._batch_encode_plus(*a , **a)
def _lowerCAmelCase ( self: Optional[int] , *a: Any , **a: Optional[int]) ->BatchEncoding:
'''simple docstring'''
a_ = kwargs.get("is_split_into_words" , a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs.")
return super()._encode_plus(*a , **a)
def _lowerCAmelCase ( self: List[Any] , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = self._tokenizer.model.save(a , name=a)
return tuple(a)
def _lowerCAmelCase ( self: str , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
a_ = 'http://www.mocksite.com/file1.txt'
a_ = '"text": ["foo", "foo"]'
a_ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class SCREAMING_SNAKE_CASE__ :
_UpperCAmelCase =200
_UpperCAmelCase ={'''Content-Length''': '''100'''}
_UpperCAmelCase ={}
def _lowerCAmelCase ( self: Any , **a: Tuple) ->Any:
'''simple docstring'''
return [bytes(a , "utf-8")]
def __UpperCAmelCase (*lowercase__ ,**lowercase__ ) -> Tuple:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" ,[str, list, dict] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
import requests
monkeypatch.setattr(lowercase__ ,"request" ,lowercase__ )
a_ = URL
if issubclass(lowercase__ ,lowercase__ ):
a_ = url
elif issubclass(lowercase__ ,lowercase__ ):
a_ = [url]
elif issubclass(lowercase__ ,lowercase__ ):
a_ = {"train": url}
a_ = "dummy"
a_ = "downloads"
a_ = tmp_path
a_ = DownloadConfig(
cache_dir=os.path.join(lowercase__ ,lowercase__ ) ,use_etag=lowercase__ ,)
a_ = DownloadManager(dataset_name=lowercase__ ,download_config=lowercase__ )
a_ = dl_manager.download(lowercase__ )
a_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowercase__ ,lowercase__ ):
a_ = [downloaded_paths]
a_ = [urls]
elif isinstance(lowercase__ ,lowercase__ ):
assert "train" in downloaded_paths.keys()
a_ = downloaded_paths.values()
a_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowercase__ ,lowercase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a_ = Path(lowercase__ )
a_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a_ = downloaded_path.read_text()
assert content == CONTENT
a_ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
a_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" ,[str, list, dict] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = str(lowercase__ )
if issubclass(lowercase__ ,lowercase__ ):
a_ = filename
elif issubclass(lowercase__ ,lowercase__ ):
a_ = [filename]
elif issubclass(lowercase__ ,lowercase__ ):
a_ = {"train": filename}
a_ = "dummy"
a_ = xz_file.parent
a_ = "extracted"
a_ = DownloadConfig(
cache_dir=lowercase__ ,use_etag=lowercase__ ,)
a_ = DownloadManager(dataset_name=lowercase__ ,download_config=lowercase__ )
a_ = dl_manager.extract(lowercase__ )
a_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowercase__ ,lowercase__ ):
a_ = [extracted_paths]
a_ = [paths]
elif isinstance(lowercase__ ,lowercase__ ):
assert "train" in extracted_paths.keys()
a_ = extracted_paths.values()
a_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowercase__ ,lowercase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a_ = Path(lowercase__ )
a_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowercase__ ,etag=lowercase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a_ = extracted_path.read_text()
a_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(lowercase__ ,start=1 ):
a_ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" ,["tar_jsonl_path", "zip_jsonl_path"] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = request.getfixturevalue(lowercase__ )
a_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ) ,start=1 ):
_test_jsonl(lowercase__ ,lowercase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" ,["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = request.getfixturevalue(lowercase__ )
a_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowercase__ ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowercase__ ) ,start=1 ):
_test_jsonl(lowercase__ ,lowercase__ )
assert num_tar == 1
assert num_jsonl == 2
def __UpperCAmelCase (lowercase__ ) -> Dict:
'''simple docstring'''
a_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowercase__ ) ,start=1 ):
assert os.path.basename(lowercase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a_ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
a_ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
a_ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = False ,) -> Optional[int]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
a_ = new_id
# turn into Numpy arrays
a_ = np.array(lowercase__ )
a_ = np.array(lowercase__ )
if reduce_labels:
a_ = 255
a_ = label - 1
a_ = 255
a_ = label != ignore_index
a_ = np.not_equal(lowercase__ ,lowercase__ )
a_ = pred_label[mask]
a_ = np.array(lowercase__ )[mask]
a_ = pred_label[pred_label == label]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = np.histogram(lowercase__ ,bins=lowercase__ ,range=(0, num_labels - 1) )[0]
a_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = False ,) -> Optional[Any]:
'''simple docstring'''
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
a_ = np.zeros((num_labels,) ,dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ ,lowercase__ ):
a_ , a_ , a_ , a_ = intersect_and_union(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = None ,lowercase__ = False ,) -> Optional[int]:
'''simple docstring'''
a_ , a_ , a_ , a_ = total_intersect_and_union(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# compute metrics
a_ = {}
a_ = total_area_intersect.sum() / total_area_label.sum()
a_ = total_area_intersect / total_area_union
a_ = total_area_intersect / total_area_label
a_ = np.nanmean(lowercase__ )
a_ = np.nanmean(lowercase__ )
a_ = all_acc
a_ = iou
a_ = acc
if nan_to_num is not None:
a_ = {metric: np.nan_to_num(lowercase__ ,nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16"))),
}) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def _lowerCAmelCase ( self: Dict , a: Optional[int] , a: Union[str, Any] , a: int , a: bool , a: Optional[int] = None , a: Optional[Dict[int, int]] = None , a: bool = False , ) ->str:
'''simple docstring'''
a_ = mean_iou(
results=a , gt_seg_maps=a , num_labels=a , ignore_index=a , nan_to_num=a , label_map=a , reduce_labels=a , )
return iou_result
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =['''pixel_values''']
def __init__( self: Optional[int] , a: bool = True , a: Dict[str, int] = None , a: PILImageResampling = PIL.Image.BICUBIC , a: bool = True , a: Dict[str, int] = None , a: Union[int, float] = 1 / 2_55 , a: bool = True , a: bool = True , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , **a: List[Any] , ) ->None:
'''simple docstring'''
super().__init__(**a)
a_ = size if size is not None else {"height": 2_56, "width": 2_56}
a_ = get_size_dict(a)
a_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
a_ = get_size_dict(a , param_name="crop_size")
a_ = do_resize
a_ = size
a_ = resample
a_ = do_center_crop
a_ = crop_size
a_ = do_rescale
a_ = rescale_factor
a_ = do_normalize
a_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCAmelCase ( self: Optional[Any] , a: np.ndarray , a: Dict[str, int] , a: PILImageResampling = PIL.Image.BICUBIC , a: Optional[Union[str, ChannelDimension]] = None , **a: Optional[int] , ) ->np.ndarray:
'''simple docstring'''
a_ = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return resize(
a , size=(size["height"], size["width"]) , resample=a , data_format=a , **a)
def _lowerCAmelCase ( self: Dict , a: np.ndarray , a: Dict[str, int] , a: Optional[Union[str, ChannelDimension]] = None , **a: int , ) ->np.ndarray:
'''simple docstring'''
a_ = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a)
def _lowerCAmelCase ( self: str , a: np.ndarray , a: Union[int, float] , a: Optional[Union[str, ChannelDimension]] = None , **a: str , ) ->Optional[Any]:
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: np.ndarray , a: Union[float, List[float]] , a: Union[float, List[float]] , a: Optional[Union[str, ChannelDimension]] = None , **a: int , ) ->np.ndarray:
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a)
def _lowerCAmelCase ( self: int , a: ImageInput , a: bool = None , a: Dict[str, int] = None , a: List[Any]=None , a: bool = None , a: Dict[str, int] = None , a: bool = None , a: float = None , a: bool = None , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[str, TensorType]] = None , a: ChannelDimension = ChannelDimension.FIRST , **a: Any , ) ->PIL.Image.Image:
'''simple docstring'''
a_ = do_resize if do_resize is not None else self.do_resize
a_ = resample if resample is not None else self.resample
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = size if size is not None else self.size
a_ = get_size_dict(a)
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(a , param_name="crop_size")
a_ = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
a_ = [to_numpy_array(a) for image in images]
if do_resize:
a_ = [self.resize(image=a , size=a , resample=a) for image in images]
if do_center_crop:
a_ = [self.center_crop(image=a , size=a) for image in images]
if do_rescale:
a_ = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
a_ = [self.normalize(image=a , mean=a , std=a) for image in images]
a_ = [to_channel_dimension_format(a , a) for image in images]
a_ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a)
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''informer'''
_UpperCAmelCase ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: Tuple , a: Optional[int] = None , a: Optional[int] = None , a: str = "student_t" , a: str = "nll" , a: int = 1 , a: List[int] = None , a: Optional[Union[str, bool]] = "mean" , a: int = 0 , a: int = 0 , a: int = 0 , a: int = 0 , a: Optional[List[int]] = None , a: Optional[List[int]] = None , a: int = 64 , a: int = 32 , a: int = 32 , a: int = 2 , a: int = 2 , a: int = 2 , a: int = 2 , a: bool = True , a: str = "gelu" , a: float = 0.05 , a: float = 0.1 , a: float = 0.1 , a: float = 0.1 , a: float = 0.1 , a: int = 1_00 , a: float = 0.02 , a: Tuple=True , a: str = "prob" , a: int = 5 , a: bool = True , **a: int , ) ->str:
'''simple docstring'''
a_ = prediction_length
a_ = context_length or prediction_length
a_ = distribution_output
a_ = loss
a_ = input_size
a_ = num_time_features
a_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a_ = scaling
a_ = num_dynamic_real_features
a_ = num_static_real_features
a_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(a) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
a_ = cardinality
else:
a_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(a) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
a_ = embedding_dimension
else:
a_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
a_ = num_parallel_samples
# Transformer architecture configuration
a_ = input_size * len(self.lags_sequence) + self._number_of_features
a_ = d_model
a_ = encoder_attention_heads
a_ = decoder_attention_heads
a_ = encoder_ffn_dim
a_ = decoder_ffn_dim
a_ = encoder_layers
a_ = decoder_layers
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = activation_function
a_ = init_std
a_ = use_cache
# Informer
a_ = attention_type
a_ = sampling_factor
a_ = distil
super().__init__(is_encoder_decoder=a , **a)
@property
def _lowerCAmelCase ( self: str) ->int:
'''simple docstring'''
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = len(lowercase__ )
a_ = sum(lowercase__ )
a_ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 ,n + 1 ):
a_ = True
for i in range(1 ,s + 1 ):
a_ = False
for i in range(1 ,n + 1 ):
for j in range(1 ,s + 1 ):
a_ = dp[i][j - 1]
if arr[i - 1] <= j:
a_ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) ,-1 ,-1 ):
if dp[n][j] is True:
a_ = s - 2 * j
break
return diff
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCAmelCase (lowercase__ ) -> List[Tuple[int, ...]]:
'''simple docstring'''
a_ = []
if isinstance(lowercase__ ,lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ ,(list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__ ,torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple[int, ...]:
'''simple docstring'''
a_ = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
a_ = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = None ,lowercase__ = None ,) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(lowercase__ ) -> None:
a_ = True
for i in range(len(lowercase__ ) ):
a_ = -1 * (i + 1)
l[reversed_idx] &= tally
a_ = l[reversed_idx]
if start_edges is None:
a_ = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
a_ = [e == (d - 1) for e, d in zip(lowercase__ ,lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0] ,end[0] + 1 ),)]
a_ = []
a_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__ ,lowercase__ ):
if s == e:
path_list.append(slice(lowercase__ ,s + 1 ) )
else:
break
a_ = tuple(lowercase__ )
a_ = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ = start[divergence_idx]
return tuple(
path + (slice(lowercase__ ,sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] ,[d - 1 for d in dims[divergence_idx + 1 :]] ,dims[divergence_idx + 1 :] ,start_edges=start_edges[divergence_idx + 1 :] ,end_edges=[True for _ in end_edges[divergence_idx + 1 :]] ,) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a_ = end[divergence_idx]
return tuple(
path + (slice(lowercase__ ,edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] ,end[divergence_idx + 1 :] ,dims[divergence_idx + 1 :] ,start_edges=[True for _ in start_edges[divergence_idx + 1 :]] ,end_edges=end_edges[divergence_idx + 1 :] ,) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] ,end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 ,end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> torch.Tensor:
'''simple docstring'''
a_ = t.shape[:no_batch_dims]
a_ = list(_flat_idx_to_idx(lowercase__ ,lowercase__ ) )
# _get_minimal_slice_set is inclusive
a_ = list(_flat_idx_to_idx(flat_end - 1 ,lowercase__ ) )
# Get an ordered list of slices to perform
a_ = _get_minimal_slice_set(
lowercase__ ,lowercase__ ,lowercase__ ,)
a_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = False ,lowercase__ = None ,lowercase__ = False ,) -> Any:
'''simple docstring'''
if not (len(lowercase__ ) > 0):
raise ValueError("Must provide at least one input" )
a_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
a_ = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(lowercase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a_ = t.reshape(-1 ,*t.shape[no_batch_dims:] )
else:
a_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a_ = tensor_tree_map(_prep_inputs ,lowercase__ )
a_ = None
if _out is not None:
a_ = tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) ,_out )
a_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a_ = 0
a_ = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
a_ = _select_chunk
else:
a_ = partial(
_chunk_slice ,flat_start=lowercase__ ,flat_end=min(lowercase__ ,i + chunk_size ) ,no_batch_dims=len(lowercase__ ) ,)
a_ = tensor_tree_map(lowercase__ ,lowercase__ )
# Run the layer on the chunk
a_ = layer(**lowercase__ )
# Allocate space for the output
if out is None:
a_ = tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) ,lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__ ,lowercase__ ):
def assign(lowercase__ ,lowercase__ ) -> None:
for k, v in da.items():
if isinstance(lowercase__ ,lowercase__ ):
assign(lowercase__ ,da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a_ = da[k]
assign(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ):
for xa, xa in zip(lowercase__ ,lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a_ = xa
elif isinstance(lowercase__ ,torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a_ = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a_ = tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ) ,lowercase__ )
return out
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[int] , a: int = 5_12 , ) ->List[str]:
'''simple docstring'''
a_ = max_chunk_size
a_ = None
a_ = None
def _lowerCAmelCase ( self: List[Any] , a: Callable , a: tuple , a: int) ->int:
'''simple docstring'''
logging.info("Tuning chunk size...")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
a_ = [c for c in candidates if c > min_chunk_size]
a_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a: int) -> bool:
try:
with torch.no_grad():
fn(*a , chunk_size=a)
return True
except RuntimeError:
return False
a_ = 0
a_ = len(a) - 1
while i > min_viable_chunk_size_index:
a_ = test_chunk_size(candidates[i])
if not viable:
a_ = (min_viable_chunk_size_index + i) // 2
else:
a_ = i
a_ = (i + len(a) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCAmelCase ( self: Union[str, Any] , a: Iterable , a: Iterable) ->bool:
'''simple docstring'''
a_ = True
for aa, aa in zip(a , a):
assert type(a) == type(a)
if isinstance(a , (list, tuple)):
consistent &= self._compare_arg_caches(a , a)
elif isinstance(a , a):
a_ = [v for _, v in sorted(aa.items() , key=lambda a: x[0])]
a_ = [v for _, v in sorted(aa.items() , key=lambda a: x[0])]
consistent &= self._compare_arg_caches(a , a)
else:
consistent &= aa == aa
return consistent
def _lowerCAmelCase ( self: str , a: Callable , a: tuple , a: int , ) ->int:
'''simple docstring'''
a_ = True
a_ = tree_map(lambda a: a.shape if isinstance(a , torch.Tensor) else a , a , a)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(a)
a_ = self._compare_arg_caches(self.cached_arg_data , a)
else:
# Otherwise, we can reuse the precomputed value
a_ = False
if not consistent:
a_ = self._determine_favorable_chunk_size(
a , a , a , )
a_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCAmelCase (lowercase__ ) -> Tuple:
'''simple docstring'''
a_ = list(s_dict.keys() )
for key in keys:
a_ = r".*/layers_(\d+)"
a_ = key
if re.match(lowercase__ ,lowercase__ ):
a_ = re.sub(r"layers_(\d+)" ,r"block/\1/layer" ,lowercase__ )
a_ = r"(encoder|decoder)\/"
if re.match(lowercase__ ,lowercase__ ):
a_ = re.match(lowercase__ ,lowercase__ ).groups()
if groups[0] == "encoder":
a_ = re.sub(r"/mlp/" ,r"/1/mlp/" ,lowercase__ )
a_ = re.sub(r"/pre_mlp_layer_norm/" ,r"/1/layer_norm/" ,lowercase__ )
elif groups[0] == "decoder":
a_ = re.sub(r"/mlp/" ,r"/2/mlp/" ,lowercase__ )
a_ = re.sub(r"/pre_mlp_layer_norm/" ,r"/2/layer_norm/" ,lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
a_ = new_key.replace(lowercase__ ,lowercase__ )
print(F"""{key} -> {new_key}""" )
a_ = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a_ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
a_ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
a_ = s_dict[key].shape[0]
a_ = s_dict[key]
for idx in range(lowercase__ ):
a_ = expert_weihts[idx]
print(F"""{key} -> {key.replace('expert/' ,'nested fstring' )}""" )
s_dict.pop(lowercase__ )
return s_dict
a_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Optional[int]:
'''simple docstring'''
import regex as re
with open(lowercase__ ,"r" ) as f:
a_ = f.read()
a_ = re.findall(r"(.*) = ([0-9.]*)" ,lowercase__ )
a_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
a_ = float(lowercase__ ) if "." in value else int(lowercase__ )
a_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" ,lowercase__ )[0]
a_ = str(activation[1] )
a_ = num_experts
a_ = SwitchTransformersConfig(**lowercase__ )
return config
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__="./" ,lowercase__=8 ) -> str:
'''simple docstring'''
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
a_ = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
a_ = convert_gin_to_config(lowercase__ ,lowercase__ )
else:
a_ = SwitchTransformersConfig.from_pretrained(lowercase__ )
a_ = SwitchTransformersForConditionalGeneration(lowercase__ )
a_ = flax_params["target"]
a_ = flatten_dict(lowercase__ ,sep="/" )
a_ = rename_keys(lowercase__ )
a_ = unflatten_dict(lowercase__ ,sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ ,lowercase__ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
a_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Dict , a: Optional[Any] , a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
a_ = [
[-1 for i in range(total + 1)] for j in range(2 ** len(a))
]
a_ = defaultdict(a) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
a_ = (1 << len(a)) - 1
def _lowerCAmelCase ( self: Optional[int] , a: Any , a: Tuple) ->Any:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
a_ = self.count_ways_until(a , task_no + 1)
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1)
# save the value.
a_ = total_ways_util
return self.dp[mask][task_no]
def _lowerCAmelCase ( self: str , a: List[Any]) ->List[Any]:
'''simple docstring'''
for i in range(len(a)):
for j in task_performed[i]:
self.task[j].append(a)
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1)
if __name__ == "__main__":
a_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
a_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ = '▁'
a_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =BertGenerationTokenizer
_UpperCAmelCase =False
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Any) ->Optional[Any]:
'''simple docstring'''
super().setUp()
a_ = BertGenerationTokenizer(a , keep_accents=a)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCAmelCase ( self: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = "<s>"
a_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def _lowerCAmelCase ( self: Optional[Any]) ->str:
'''simple docstring'''
a_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "<pad>")
self.assertEqual(len(a) , 10_02)
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00)
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
a_ = BertGenerationTokenizer(a , keep_accents=a)
a_ = tokenizer.tokenize("This is a test")
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a) , [2_85, 46, 10, 1_70, 3_82] , )
a_ = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a_ = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a_ = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCAmelCase ( self: Dict) ->Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder")
@slow
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = "Hello World!"
a_ = [1_85_36, 22_60, 1_01]
self.assertListEqual(a , self.big_tokenizer.encode(a))
@slow
def _lowerCAmelCase ( self: Optional[Any]) ->str:
'''simple docstring'''
a_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
a_ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(a , self.big_tokenizer.encode(a))
@require_torch
@slow
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a_ = list(self.big_tokenizer.get_vocab().keys())[:10]
a_ = " ".join(a)
a_ = self.big_tokenizer.encode_plus(a , return_tensors="pt" , return_token_type_ids=a)
a_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a)
a_ = BertGenerationConfig()
a_ = BertGenerationEncoder(a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a)
model(**a)
@slow
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = {"input_ids": [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __UpperCAmelCase (lowercase__ ) -> tuple[np.ndarray, np.ndarray]:
'''simple docstring'''
a_ , a_ = np.shape(lowercase__ )
if rows != columns:
a_ = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(lowercase__ )
a_ = np.zeros((rows, columns) )
a_ = np.zeros((rows, columns) )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
a_ = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
a_ = (table[i][j] - total) / upper[j][j]
a_ = 1
for j in range(lowercase__ ,lowercase__ ):
a_ = sum(lower[i][k] * upper[k][j] for k in range(lowercase__ ) )
a_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =42
_UpperCAmelCase =42
_UpperCAmelCase =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
a_ = 0 # The first color of the flag.
a_ = 1 # The second color of the flag.
a_ = 2 # The third color of the flag.
a_ = (red, white, blue)
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
if not sequence:
return []
if len(lowercase__ ) == 1:
return list(lowercase__ )
a_ = 0
a_ = len(lowercase__ ) - 1
a_ = 0
while mid <= high:
if sequence[mid] == colors[0]:
a_ , a_ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
a_ , a_ = sequence[high], sequence[mid]
high -= 1
else:
a_ = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(lowercase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by commas:\n').strip()
a_ = [int(item.strip()) for item in user_input.split(',')]
print(F'{dutch_national_flag_sort(unsorted)}')
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
a_ = checkpoint
a_ = {}
a_ = vae_state_dict["encoder.conv_in.weight"]
a_ = vae_state_dict["encoder.conv_in.bias"]
a_ = vae_state_dict["encoder.conv_out.weight"]
a_ = vae_state_dict["encoder.conv_out.bias"]
a_ = vae_state_dict["encoder.norm_out.weight"]
a_ = vae_state_dict["encoder.norm_out.bias"]
a_ = vae_state_dict["decoder.conv_in.weight"]
a_ = vae_state_dict["decoder.conv_in.bias"]
a_ = vae_state_dict["decoder.conv_out.weight"]
a_ = vae_state_dict["decoder.conv_out.bias"]
a_ = vae_state_dict["decoder.norm_out.weight"]
a_ = vae_state_dict["decoder.norm_out.bias"]
a_ = vae_state_dict["quant_conv.weight"]
a_ = vae_state_dict["quant_conv.bias"]
a_ = vae_state_dict["post_quant_conv.weight"]
a_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
a_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
a_ = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
# Retrieves the keys for the decoder up blocks only
a_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
a_ = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
for i in range(lowercase__ ):
a_ = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a_ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a_ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""down.{i}.block""", "new": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
a_ = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
a_ = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""mid.block_{i}""", "new": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
a_ = renew_vae_attention_paths(lowercase__ )
a_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
conv_attn_to_linear(lowercase__ )
for i in range(lowercase__ ):
a_ = num_up_blocks - 1 - i
a_ = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a_ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a_ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""up.{block_id}.block""", "new": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
a_ = 2
for i in range(1 ,num_mid_res_blocks + 1 ):
a_ = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a_ = renew_vae_resnet_paths(lowercase__ )
a_ = {"old": F"""mid.block_{i}""", "new": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
a_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
a_ = renew_vae_attention_paths(lowercase__ )
a_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase__ ,lowercase__ ,lowercase__ ,additional_replacements=[meta_path] ,config=lowercase__ )
conv_attn_to_linear(lowercase__ )
return new_checkpoint
def __UpperCAmelCase (lowercase__ ,lowercase__ ,) -> Optional[int]:
'''simple docstring'''
a_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
a_ = io.BytesIO(r.content )
a_ = OmegaConf.load(lowercase__ )
a_ = 512
a_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
a_ = {}
with safe_open(lowercase__ ,framework="pt" ,device="cpu" ) as f:
for key in f.keys():
a_ = f.get_tensor(lowercase__ )
else:
a_ = torch.load(lowercase__ ,map_location=lowercase__ )["state_dict"]
# Convert the VAE model.
a_ = create_vae_diffusers_config(lowercase__ ,image_size=lowercase__ )
a_ = custom_convert_ldm_vae_checkpoint(lowercase__ ,lowercase__ )
a_ = AutoencoderKL(**lowercase__ )
vae.load_state_dict(lowercase__ )
vae.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
a_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[int] , a: Optional[int] , a: Any=3 , a: str=32 , a: Union[str, Any]=3 , a: str=10 , a: str=[10, 20, 30, 40] , a: List[Any]=[1, 1, 2, 1] , a: int=True , a: Union[str, Any]=True , a: Dict="relu" , a: Any=3 , a: int=None , ) ->str:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = embeddings_size
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = num_labels
a_ = scope
a_ = len(a)
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self: Union[str, Any] , a: List[Any] , a: Dict , a: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = TFRegNetModel(config=a)
a_ = model(a , training=a)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self: Dict , a: List[Any] , a: Tuple , a: Optional[int]) ->Dict:
'''simple docstring'''
a_ = self.num_labels
a_ = TFRegNetForImageClassification(a)
a_ = model(a , labels=a , training=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCAmelCase ( self: Dict) ->List[str]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_UpperCAmelCase =(
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: List[Any]) ->List[Any]:
'''simple docstring'''
a_ = TFRegNetModelTester(self)
a_ = ConfigTester(self , config_class=a , has_text_modality=a)
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds")
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def _lowerCAmelCase ( self: List[str]) ->List[str]:
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings")
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Optional[Any]) ->Any:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a)
def _lowerCAmelCase ( self: Optional[int]) ->Any:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def _lowerCAmelCase ( self: List[str]) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(a: Dict , a: int , a: Any):
a_ = model_class(a)
a_ = model(**self._prepare_for_class(a , a) , training=a)
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(a) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ = layer_type
a_ = True
check_hidden_states_output(a , a , a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(a , a , a)
def _lowerCAmelCase ( self: Any) ->str:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a: Dict , a: Dict , a: Dict , a: int={}):
a_ = model(a , return_dict=a , **a)
a_ = model(a , return_dict=a , **a).to_tuple()
def recursive_check(a: List[Any] , a: Dict):
if isinstance(a , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(a , a):
recursive_check(a , a)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a , a)) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}"""
) , )
recursive_check(a , a)
for model_class in self.all_model_classes:
a_ = model_class(a)
a_ = self._prepare_for_class(a , a)
a_ = self._prepare_for_class(a , a)
check_equivalence(a , a , a)
a_ = self._prepare_for_class(a , a , return_labels=a)
a_ = self._prepare_for_class(a , a , return_labels=a)
check_equivalence(a , a , a)
a_ = self._prepare_for_class(a , a)
a_ = self._prepare_for_class(a , a)
check_equivalence(a , a , a , {"output_hidden_states": True})
a_ = self._prepare_for_class(a , a , return_labels=a)
a_ = self._prepare_for_class(a , a , return_labels=a)
check_equivalence(a , a , a , {"output_hidden_states": True})
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Tuple:
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = TFRegNetModel.from_pretrained(a)
self.assertIsNotNone(a)
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self: List[str]) ->Tuple:
'''simple docstring'''
a_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=a , return_tensors="tf")
# forward pass
a_ = model(**a , training=a)
# verify the logits
a_ = tf.TensorShape((1, 10_00))
self.assertEqual(outputs.logits.shape , a)
a_ = tf.constant([-0.4180, -1.5051, -3.4836])
tf.debugging.assert_near(outputs.logits[0, :3] , a , atol=1e-4)
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
import math
import os
import sys
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
a_ = ""
try:
with open(lowercase__ ,"rb" ) as binary_file:
a_ = binary_file.read()
for dat in data:
a_ = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> None:
'''simple docstring'''
lexicon.pop(lowercase__ )
a_ = last_match_id
if math.loga(lowercase__ ).is_integer():
for curr_key in lexicon:
a_ = "0" + lexicon[curr_key]
a_ = bin(lowercase__ )[2:]
def __UpperCAmelCase (lowercase__ ) -> str:
'''simple docstring'''
a_ = {"0": "0", "1": "1"}
a_ , a_ = "", ""
a_ = len(lowercase__ )
for i in range(len(lowercase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
index += 1
a_ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a_ = lexicon[curr_string]
result += last_match_id
return result
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> str:
'''simple docstring'''
a_ = os.path.getsize(lowercase__ )
a_ = bin(lowercase__ )[2:]
a_ = len(lowercase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> None:
'''simple docstring'''
a_ = 8
try:
with open(lowercase__ ,"wb" ) as opened_file:
a_ = [
to_write[i : i + byte_length]
for i in range(0 ,len(lowercase__ ) ,lowercase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowercase__ ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> None:
'''simple docstring'''
a_ = read_file_binary(lowercase__ )
a_ = compress_data(lowercase__ )
a_ = add_file_length(lowercase__ ,lowercase__ )
write_file_binary(lowercase__ ,lowercase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
def is_in_circle(lowercase__ ,lowercase__ ) -> bool:
a_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
a_ = mean(
int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) )
for _ in range(lowercase__ ) )
# The ratio of the area for circle to square is pi/4.
a_ = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ = 0.0 ,lowercase__ = 1.0 ,) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(lowercase__ ,lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value)
def __UpperCAmelCase (lowercase__ ,lowercase__ = 0.0 ,lowercase__ = 1.0 ) -> None:
'''simple docstring'''
def identity_function(lowercase__ ) -> float:
return x
a_ = area_under_curve_estimator(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
a_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def __UpperCAmelCase (lowercase__ ) -> None:
'''simple docstring'''
def function_to_integrate(lowercase__ ) -> float:
return sqrt(4.0 - x * x )
a_ = area_under_curve_estimator(
lowercase__ ,lowercase__ ,0.0 ,2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
a_ = 100
a_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __UpperCAmelCase (lowercase__ ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
a_ = set()
a_ = 42
a_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __UpperCAmelCase (lowercase__ = 5000 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 ,lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a_ = logging.get_logger(__name__)
a_ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''bloom'''
_UpperCAmelCase =['''past_key_values''']
_UpperCAmelCase ={
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self: int , a: Tuple=25_08_80 , a: Optional[int]=64 , a: Tuple=2 , a: str=8 , a: int=1e-5 , a: Optional[Any]=0.02 , a: Tuple=True , a: Dict=1 , a: Any=2 , a: Optional[int]=False , a: str=0.0 , a: Optional[Any]=0.0 , a: List[str]=1 , a: Optional[int]=False , **a: Dict , ) ->Dict:
'''simple docstring'''
a_ = vocab_size
# Backward compatibility with n_embed kwarg
a_ = kwargs.pop("n_embed" , a)
a_ = hidden_size if n_embed is None else n_embed
a_ = n_layer
a_ = n_head
a_ = layer_norm_epsilon
a_ = initializer_range
a_ = use_cache
a_ = pretraining_tp
a_ = apply_residual_connection_post_layernorm
a_ = hidden_dropout
a_ = attention_dropout
a_ = bos_token_id
a_ = eos_token_id
a_ = slow_but_exact
super().__init__(bos_token_id=a , eos_token_id=a , **a)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =version.parse('''1.12''' )
def __init__( self: Union[str, Any] , a: PretrainedConfig , a: str = "default" , a: List[PatchingSpec] = None , a: bool = False , ) ->List[str]:
'''simple docstring'''
super().__init__(a , task=a , patching_specs=a , use_past=a)
if not getattr(self._config , "pad_token_id" , a):
# TODO: how to do that better?
a_ = 0
@property
def _lowerCAmelCase ( self: List[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
a_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(a , direction="inputs" , inverted_values_shape=a)
a_ = {0: "batch", 1: "past_sequence + sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _lowerCAmelCase ( self: Optional[Any]) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def _lowerCAmelCase ( self: List[Any]) ->int:
'''simple docstring'''
return self._config.n_head
@property
def _lowerCAmelCase ( self: Dict) ->float:
'''simple docstring'''
return 1e-3
def _lowerCAmelCase ( self: Optional[int] , a: "PreTrainedTokenizer" , a: int = -1 , a: int = -1 , a: bool = False , a: Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
a_ = super(a , self).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a)
# We need to order the input in the way they appears in the forward()
a_ = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
a_ , a_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ = self._config.hidden_size // self.num_attention_heads
a_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
a_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
a_ = [
(torch.zeros(a), torch.zeros(a)) for _ in range(self.num_layers)
]
a_ = common_inputs["attention_mask"]
if self.use_past:
a_ = ordered_inputs["attention_mask"].dtype
a_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a)] , dim=1)
return ordered_inputs
@property
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
return 13
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: List[Any]) ->List[str]:
'''simple docstring'''
a_ = Rectangle(height=0.5 , width=0.5)
a_ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
a_ = [mem.copy() for i in range(6)]
a_ = [mem.copy() for i in range(6)]
a_ = VGroup(*a).arrange(a , buff=0)
a_ = VGroup(*a).arrange(a , buff=0)
a_ = VGroup(a , a).arrange(a , buff=0)
a_ = Text("CPU" , font_size=24)
a_ = Group(a , a).arrange(a , buff=0.5 , aligned_edge=a)
cpu.move_to([-2.5, -0.5, 0])
self.add(a)
a_ = [mem.copy() for i in range(1)]
a_ = VGroup(*a).arrange(a , buff=0)
a_ = Text("GPU" , font_size=24)
a_ = Group(a , a).arrange(a , buff=0.5 , aligned_edge=a)
gpu.align_to(a , a)
gpu.set_x(gpu.get_x() - 1)
self.add(a)
a_ = [mem.copy() for i in range(6)]
a_ = VGroup(*a).arrange(a , buff=0)
a_ = Text("Model" , font_size=24)
a_ = Group(a , a).arrange(a , buff=0.5 , aligned_edge=a)
model.move_to([3, -1.0, 0])
self.play(
Create(a , run_time=1) , Create(a , run_time=1) , Create(a , run_time=1) , )
a_ = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
a_ = Square(side_length=2.2)
key.move_to([-5, 2, 0])
a_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(a , run_time=2.5) , Write(a) , Write(a))
self.add(a)
a_ = []
a_ = []
a_ = []
for i, rect in enumerate(a):
a_ = Rectangle(height=0.46 , width=0.46).set_stroke(width=0.0).set_fill(a , opacity=0.7)
cpu_target.move_to(a)
cpu_target.generate_target()
a_ = 0.46 / 4
a_ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=a)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=a , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=a , buff=0.0)
cpu_targs.append(a)
first_animations.append(rect.animate(run_time=0.5).set_stroke(a))
second_animations.append(MoveToTarget(a , run_time=1.5))
self.play(*a)
self.play(*a)
self.wait()
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
'''simple docstring'''
import string
from math import logaa
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
a_ = document.translate(
str.maketrans("" ,"" ,string.punctuation ) ).replace("\n" ,"" )
a_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> tuple[int, int]:
'''simple docstring'''
a_ = corpus.lower().translate(
str.maketrans("" ,"" ,string.punctuation ) ) # strip all punctuation and replace it with ''
a_ = corpus_without_punctuation.split("\n" )
a_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase__ ))
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) ,3 )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
return round(tf * idf ,3 )
| 685 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a_ = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Any , a: Path , a: Union[str, None] = None , a: Union[List[str], None] = None , a: Union[str, List[str], None] = None , a: bool = True , ) ->Optional[Any]:
'''simple docstring'''
a_ = [file for file in os.listdir(a) if os.path.isfile(os.path.join(a , a))]
if identifier is not None:
a_ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a):
for n_ in n_identifier:
a_ = [file for file in files if n_ not in file]
else:
a_ = [file for file in files if n_identifier not in file]
a_ = ignore_files or []
ignore_files.append("__init__.py")
a_ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , a)
if only_modules:
a_ = file.split(".")[0]
try:
a_ = getattr(a , a)
a_ = doctest.DocTestSuite(a)
a_ = unittest.TextTestRunner().run(a)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
a_ = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "modeling"
a_ = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(a , identifier=a , ignore_files=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "tokenization"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = "configuration"
self.analyze_directory(a , identifier=a)
def _lowerCAmelCase ( self: Union[str, Any]) ->Any:
'''simple docstring'''
a_ = Path("src/transformers")
a_ = ["configuration", "modeling", "tokenization"]
self.analyze_directory(a , n_identifier=a)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = Path("docs/source")
a_ = ["favicon.ico"]
self.analyze_directory(a , ignore_files=a , only_modules=a)
| 685 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''Wav2Vec2FeatureExtractor'''
_UpperCAmelCase ='''AutoTokenizer'''
def __init__( self: Dict , a: str , a: Dict) ->Optional[Any]:
'''simple docstring'''
super().__init__(a , a)
a_ = self.feature_extractor
a_ = False
@classmethod
def _lowerCAmelCase ( cls: Union[str, Any] , a: Union[str, Any] , **a: Dict) ->Union[str, Any]:
'''simple docstring'''
try:
return super().from_pretrained(a , **a)
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , a , )
a_ = WavaVecaFeatureExtractor.from_pretrained(a , **a)
a_ = WavaVecaCTCTokenizer.from_pretrained(a , **a)
return cls(feature_extractor=a , tokenizer=a)
def __call__( self: Optional[int] , *a: Optional[int] , **a: Optional[int]) ->Optional[Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*a , **a)
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.")
a_ = kwargs.pop("raw_speech")
else:
a_ = kwargs.pop("audio" , a)
a_ = kwargs.pop("sampling_rate" , a)
a_ = kwargs.pop("text" , a)
if len(a) > 0:
a_ = args[0]
a_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if audio is not None:
a_ = self.feature_extractor(a , *a , sampling_rate=a , **a)
if text is not None:
a_ = self.tokenizer(a , **a)
if text is None:
return inputs
elif audio is None:
return encodings
else:
a_ = encodings["input_ids"]
return inputs
def _lowerCAmelCase ( self: List[str] , *a: Optional[Any] , **a: Union[str, Any]) ->str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor.pad(*a , **a)
a_ = kwargs.pop("input_features" , a)
a_ = kwargs.pop("labels" , a)
if len(a) > 0:
a_ = args[0]
a_ = args[1:]
if input_features is not None:
a_ = self.feature_extractor.pad(a , *a , **a)
if labels is not None:
a_ = self.tokenizer.pad(a , **a)
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a_ = labels["input_ids"]
return input_features
def _lowerCAmelCase ( self: Union[str, Any] , *a: Dict , **a: str) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a)
def _lowerCAmelCase ( self: List[str] , *a: List[Any] , **a: Optional[int]) ->Any:
'''simple docstring'''
return self.tokenizer.decode(*a , **a)
@contextmanager
def _lowerCAmelCase ( self: str) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call.")
a_ = True
a_ = self.tokenizer
yield
a_ = self.feature_extractor
a_ = False
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 100 ) -> int:
'''simple docstring'''
a_ = n * (n + 1) * (2 * n + 1) / 6
a_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> list:
'''simple docstring'''
a_ = len(lowercase__ )
a_ = []
for i in range(len(lowercase__ ) - pat_len + 1 ):
a_ = True
for j in range(lowercase__ ):
if s[i + j] != pattern[j]:
a_ = False
break
if match_found:
position.append(lowercase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 685 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =(PNDMScheduler,)
_UpperCAmelCase =(('''num_inference_steps''', 50),)
def _lowerCAmelCase ( self: int , **a: Optional[int]) ->Any:
'''simple docstring'''
a_ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a)
return config
def _lowerCAmelCase ( self: Any , a: Tuple=0 , **a: Any) ->Any:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
new_scheduler.set_timesteps(a)
# copy over dummy past residuals
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Union[str, Any] , a: str=0 , **a: Union[str, Any]) ->Tuple:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# copy over dummy past residuals (must be after setting timesteps)
a_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a)
a_ = scheduler_class.from_pretrained(a)
# copy over dummy past residuals
new_scheduler.set_timesteps(a)
# copy over dummy past residual (must be after setting timesteps)
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , a , a , **a).prev_sample
a_ = new_scheduler.step_prk(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
a_ = scheduler.step_plms(a , a , a , **a).prev_sample
a_ = new_scheduler.step_plms(a , a , a , **a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def _lowerCAmelCase ( self: Dict , **a: int) ->Any:
'''simple docstring'''
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(**a)
a_ = scheduler_class(**a)
a_ = 10
a_ = self.dummy_model()
a_ = self.dummy_sample_deter
scheduler.set_timesteps(a)
for i, t in enumerate(scheduler.prk_timesteps):
a_ = model(a , a)
a_ = scheduler.step_prk(a , a , a).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
a_ = model(a , a)
a_ = scheduler.step_plms(a , a , a).prev_sample
return sample
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = dict(self.forward_default_kwargs)
a_ = kwargs.pop("num_inference_steps" , a)
for scheduler_class in self.scheduler_classes:
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
a_ = self.dummy_sample
a_ = 0.1 * sample
if num_inference_steps is not None and hasattr(a , "set_timesteps"):
scheduler.set_timesteps(a)
elif num_inference_steps is not None and not hasattr(a , "set_timesteps"):
a_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a_ = dummy_past_residuals[:]
a_ = scheduler.step_prk(a , 0 , a , **a).prev_sample
a_ = scheduler.step_prk(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
a_ = scheduler.step_plms(a , 0 , a , **a).prev_sample
a_ = scheduler.step_plms(a , 1 , a , **a).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _lowerCAmelCase ( self: Dict) ->List[Any]:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a)
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config(steps_offset=1)
a_ = scheduler_class(**a)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1]) , )
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def _lowerCAmelCase ( self: int) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a)
def _lowerCAmelCase ( self: Optional[int]) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def _lowerCAmelCase ( self: Tuple) ->Optional[Any]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00]):
self.check_over_forward(num_inference_steps=a)
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = 27
for scheduler_class in self.scheduler_classes:
a_ = self.dummy_sample
a_ = 0.1 * sample
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.set_timesteps(a)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
a_ = scheduler.step_prk(a , a , a).prev_sample
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
with self.assertRaises(a):
a_ = self.scheduler_classes[0]
a_ = self.get_scheduler_config()
a_ = scheduler_class(**a)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.full_loop()
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 198.1318) < 1e-2
assert abs(result_mean.item() - 0.2580) < 1e-3
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = self.full_loop(prediction_type="v_prediction")
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 67.3986) < 1e-2
assert abs(result_mean.item() - 0.0878) < 1e-3
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 230.0399) < 1e-2
assert abs(result_mean.item() - 0.2995) < 1e-3
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = self.full_loop(set_alpha_to_one=a , beta_start=0.01)
a_ = torch.sum(torch.abs(a))
a_ = torch.mean(torch.abs(a))
assert abs(result_sum.item() - 186.9482) < 1e-2
assert abs(result_mean.item() - 0.2434) < 1e-3
| 685 | 1 |
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
@staticmethod
def _lowerCAmelCase ( *a: Optional[Any] , **a: Tuple) ->int:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@require_torch
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a_ = image_classifier(a , candidate_labels=["a", "b", "c"])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
a_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(a) , [
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
] , )
@require_tf
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf")
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a_ = image_classifier(a , candidate_labels=["a", "b", "c"])
self.assertEqual(
nested_simplify(a) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
a_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2)
self.assertEqual(
nested_simplify(a) , [
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
[
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
{"score": 0.333, "label": ANY(a)},
],
] , )
@slow
@require_torch
def _lowerCAmelCase ( self: int) ->Union[str, Any]:
'''simple docstring'''
a_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a_ = image_classifier(a , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(a) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(a) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
a_ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf")
# This is an image of 2 cats with remotes and no planes
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
a_ = image_classifier(a , candidate_labels=["cat", "plane", "remote"])
self.assertEqual(
nested_simplify(a) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2)
self.assertEqual(
nested_simplify(a) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 685 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = torch.nn.Linear(2 ,4 )
a_ = torch.optim.AdamW(model.parameters() ,lr=1.0 )
a_ = torch.optim.lr_scheduler.OneCycleLR(lowercase__ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
a_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCAmelCase (lowercase__ ) -> int:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
@require_cuda
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
a_ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a):
a_ = Accelerator(cpu=a)
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = Accelerator()
a_ = GradientState()
assert state.num_steps == 1
a_ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a_ = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = accelerator.prepare(a , a , a , a , a)
self.assertTrue(prepared_model in accelerator._models)
self.assertTrue(prepared_optimizer in accelerator._optimizers)
self.assertTrue(prepared_scheduler in accelerator._schedulers)
self.assertTrue(prepared_train_dl in accelerator._dataloaders)
self.assertTrue(prepared_valid_dl in accelerator._dataloaders)
def _lowerCAmelCase ( self: Tuple) ->int:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
accelerator.prepare(a , a , a , a , a)
accelerator.free_memory()
self.assertTrue(len(accelerator._models) == 0)
self.assertTrue(len(accelerator._optimizers) == 0)
self.assertTrue(len(accelerator._schedulers) == 0)
self.assertTrue(len(accelerator._dataloaders) == 0)
def _lowerCAmelCase ( self: Tuple) ->Union[str, Any]:
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a: Optional[Any] , **a: str):
pass
with patch("torch.cuda.set_device" , a), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64"):
a_ = Accelerator()
self.assertEqual(str(accelerator.state.device) , "cuda:64")
def _lowerCAmelCase ( self: Tuple) ->str:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
accelerator.prepare(a , a , a , a , a)
a_ = get_signature(a)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a)
# make sure random weights don't match
load_random_weights(a)
self.assertTrue(abs(model_signature - get_signature(a)) > 1e-3)
# make sure loaded weights match
accelerator.load_state(a)
self.assertTrue(abs(model_signature - get_signature(a)) < 1e-3)
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
accelerator.prepare(a , a , a , a , a)
a_ = get_signature(a)
# saving hook
def save_config(a: Dict , a: List[Any] , a: List[str]):
a_ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(a , "data.json") , "w") as f:
json.dump(a , a)
# loading hook
def load_config(a: Union[str, Any] , a: Tuple):
with open(os.path.join(a , "data.json") , "r") as f:
a_ = json.load(a)
a_ = config["class_name"]
a_ = accelerator.register_save_state_pre_hook(a)
a_ = accelerator.register_load_state_pre_hook(a)
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a)
# make sure random weights don't match with hooks
load_random_weights(a)
self.assertTrue(abs(model_signature - get_signature(a)) > 1e-3)
# random class name to verify correct one is loaded
a_ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(a)
self.assertTrue(abs(model_signature - get_signature(a)) < 1e-3)
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__)
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a)
# make sure random weights don't match with hooks removed
load_random_weights(a)
self.assertTrue(abs(model_signature - get_signature(a)) > 1e-3)
# random class name to verify correct one is loaded
a_ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(a)
self.assertTrue(abs(model_signature - get_signature(a)) < 1e-3)
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__)
def _lowerCAmelCase ( self: int) ->Optional[Any]:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
a_ = None
# This should work
a_ , a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
a , a , a , a , a , a)
self.assertTrue(dummy_obj is None)
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
a_ = Accelerator()
a_ , a_ , a_ , a_ , a_ = create_components()
a_ = [1, 2, 3]
# This should work
a_ , a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
a , a , a , a , a , a)
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(a , "_is_accelerate_prepared" , a) , a , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _lowerCAmelCase ( self: Optional[int]) ->List[str]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a , device_map={"": 0} , )
a_ = Accelerator()
# This should work
a_ = accelerator.prepare(a)
@slow
@require_bnb
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
from transformers import AutoModelForCausalLM
a_ = Accelerator()
with init_empty_weights():
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ = infer_auto_device_map(a)
a_ = "cpu"
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=a , load_in_abit=a , llm_inta_enable_fpaa_cpu_offload=a)
# This should not work and get value error
with self.assertRaises(a):
a_ = accelerator.prepare(a)
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
a_ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
a_ = infer_auto_device_map(a)
a_ = 1
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a , device_map=a , )
a_ = Accelerator()
# This should not work and get value error
with self.assertRaises(a):
a_ = accelerator.prepare(a)
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
a_ = infer_auto_device_map(a)
a_ = 1
a_ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=a , device_map=a , )
a_ = Accelerator()
# This should work
a_ = accelerator.prepare(a)
@require_cuda
def _lowerCAmelCase ( self: List[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = torch.nn.Linear(10 , 10)
a_ = torch.optim.SGD(model.parameters() , lr=0.01)
a_ = Accelerator(cpu=a)
a_ = accelerator.prepare(a)
| 685 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 1000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 ,lowercase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> tuple[int, int]:
'''simple docstring'''
a_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a_ = x_den * y_den * z_den
a_ = gcd(lowercase__ ,lowercase__ )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase (lowercase__ = 35 ) -> int:
'''simple docstring'''
a_ = set()
a_ = 42
a_ = Fraction(0 )
a_ = 42
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
a_ = x_num * y_den + x_den * y_num
a_ = x_den * y_den
a_ = gcd(lowercase__ ,lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
unique_s.add(lowercase__ )
# n=2
a_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a_ = x_den * x_den * y_den * y_den
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
a_ = int(sqrt(lowercase__ ) )
a_ = int(sqrt(lowercase__ ) )
a_ = gcd(lowercase__ ,lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
unique_s.add(lowercase__ )
# n=-1
a_ = x_num * y_num
a_ = x_den * y_num + x_num * y_den
a_ = gcd(lowercase__ ,lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
unique_s.add(lowercase__ )
# n=2
a_ = x_num * x_num * y_num * y_num
a_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
a_ = int(sqrt(lowercase__ ) )
a_ = int(sqrt(lowercase__ ) )
a_ = gcd(lowercase__ ,lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
unique_s.add(lowercase__ )
for num, den in unique_s:
total += Fraction(lowercase__ ,lowercase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> list:
'''simple docstring'''
a_ = [True] * n
a_ = False
a_ = False
a_ = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
a_ = i * 2
while index < n:
a_ = False
a_ = index + i
a_ = [2]
for i in range(3 ,lowercase__ ,2 ):
if is_prime[i]:
primes.append(lowercase__ )
return primes
def __UpperCAmelCase (lowercase__ = 999966663333 ) -> int:
'''simple docstring'''
a_ = math.floor(math.sqrt(lowercase__ ) ) + 100
a_ = prime_sieve(lowercase__ )
a_ = 0
a_ = 0
a_ = primes[prime_index]
while (last_prime**2) <= limit:
a_ = primes[prime_index + 1]
a_ = last_prime**2
a_ = next_prime**2
# Get numbers divisible by lps(current)
a_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 685 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: Optional[Any] , *a: int , **a: Optional[int]) ->None:
'''simple docstring'''
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,) -> Dict:
'''simple docstring'''
if attention_mask is None:
a_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a_ = torch.ones(config.encoder_layers ,config.encoder_attention_heads ,device=lowercase__ )
if decoder_head_mask is None:
a_ = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=lowercase__ )
if cross_attn_head_mask is None:
a_ = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=lowercase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any , a: int , a: Union[str, Any]=13 , a: Any=7 , a: Optional[int]=True , a: Tuple=False , a: List[str]=99 , a: Any=16 , a: Dict=2 , a: List[Any]=4 , a: int=4 , a: Optional[Any]="relu" , a: Optional[int]=0.1 , a: List[Any]=0.1 , a: List[Any]=0.0 , a: List[Any]=0.0 , a: List[Any]=20 , a: Tuple=2 , a: Optional[int]=1 , a: Any=0 , ) ->Any:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = self.eos_token_id # Eos Token
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a_ = input_ids.clamp(self.pad_token_id + 1)
a_ = decoder_input_ids.clamp(self.pad_token_id + 1)
a_ = self.get_config()
a_ = prepare_mam_aaa_inputs_dict(a , a , a)
return config, inputs_dict
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowerCAmelCase ( self: Any) ->Tuple:
'''simple docstring'''
a_ , a_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self: Union[str, Any] , a: int , a: Any) ->int:
'''simple docstring'''
a_ = MaMaaaModel(config=a).get_decoder().to(a).eval()
a_ = inputs_dict["input_ids"]
a_ = inputs_dict["attention_mask"]
a_ = inputs_dict["head_mask"]
# first forward pass
a_ = model(a , attention_mask=a , head_mask=a , use_cache=a)
a_ , a_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
a_ = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
a_ = torch.cat([input_ids, next_tokens] , dim=-1)
a_ = torch.cat([attention_mask, next_attn_mask] , dim=-1)
a_ = model(a , attention_mask=a)["last_hidden_state"]
a_ = model(a , attention_mask=a , past_key_values=a)[
"last_hidden_state"
]
# select random slice
a_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
a_ = output_from_no_past[:, -3:, random_slice_idx].detach()
a_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1e-2))
def _lowerCAmelCase ( self: Dict , a: int , a: Tuple) ->Any:
'''simple docstring'''
a_ = MaMaaaModel(config=a).to(a).eval()
a_ = model(**a)
a_ = outputs.encoder_last_hidden_state
a_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_encoder()
encoder.save_pretrained(a)
a_ = MaMaaaEncoder.from_pretrained(a).to(a)
a_ = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
a_ = model.get_decoder()
decoder.save_pretrained(a)
a_ = MaMaaaDecoder.from_pretrained(a).to(a)
a_ = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=a , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_UpperCAmelCase =(
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_UpperCAmelCase =True
_UpperCAmelCase =True
_UpperCAmelCase =False
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Dict , a: Dict , a: Optional[int] , a: Tuple , a: List[str] , a: str) ->int:
'''simple docstring'''
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = MaMaaaModelTester(self)
a_ = ConfigTester(self , config_class=a)
def _lowerCAmelCase ( self: int) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a_ = model_class(a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a)
a_ , a_ = model_class.from_pretrained(a , output_loading_info=a)
self.assertEqual(info["missing_keys"] , [])
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a)
def _lowerCAmelCase ( self: Tuple) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a)
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
a_ = model_class(a)
model.to(a)
model.eval()
a_ = copy.deepcopy(self._prepare_for_class(a , a))
if not self.is_encoder_decoder:
a_ = inputs["input_ids"]
del inputs["input_ids"]
else:
a_ = inputs["input_ids"]
a_ = inputs.get("decoder_input_ids" , a)
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , a)
a_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
a_ = wte(a)
else:
a_ = wte(a)
a_ = wte(a)
with torch.no_grad():
model(**a)[0]
def _lowerCAmelCase ( self: int) ->List[Any]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs()
a_ = input_dict["input_ids"]
a_ = input_ids.ne(1).to(a)
a_ = MaMaaaForConditionalGeneration(a).eval().to(a)
if torch_device == "cuda":
model.half()
model.generate(a , attention_mask=a)
model.generate(num_beams=4 , do_sample=a , early_stopping=a , num_return_sequences=3)
def __UpperCAmelCase (lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
return torch.tensor(lowercase__ ,dtype=torch.long ,device=lowercase__ )
a_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M")
def _lowerCAmelCase ( self: str) ->List[str]:
'''simple docstring'''
a_ = MaMaaaModel.from_pretrained("facebook/m2m100_418M").to(a)
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]])
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]])
a_ = prepare_mam_aaa_inputs_dict(model.config , a , a)
with torch.no_grad():
a_ = model(**a)[0]
a_ = torch.Size((1, 11, 10_24))
self.assertEqual(output.shape , a)
# change to expected output here
a_ = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=a)
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a))
def _lowerCAmelCase ( self: Optional[int]) ->int:
'''simple docstring'''
a_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(a)
# change to intended input
a_ = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]])
a_ = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]])
a_ = prepare_mam_aaa_inputs_dict(model.config , a , a)
with torch.no_grad():
a_ = model(**a)[0]
a_ = torch.Size((1, 11, model.config.vocab_size))
self.assertEqual(output.shape , a)
# change to expected output here
a_ = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=a)
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=a))
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
a_ = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M").to(a)
a_ = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en")
a_ = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
a_ = tokenizer(a , padding=a , return_tensors="pt")
a_ = model.generate(
input_ids=dct["input_ids"].to(a) , attention_mask=dct["attention_mask"].to(a) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en") , )
a_ = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
a_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=a , skip_special_tokens=a)
assert generated == expected_en
| 685 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
a_ = number_of_bytes // partitions
a_ = []
for i in range(lowercase__ ):
a_ = i * bytes_per_partition + 1
a_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a_ = {
'abeja/gpt-neox-japanese-2.7b': 2_048,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = json.loads(f.read() )
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
a_ = collections.OrderedDict()
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.readlines()
a_ = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ = b
a_ = idx
for wd in b:
a_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase =VOCAB_FILES_NAMES
_UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase =['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , a: Union[str, Any] , a: Optional[int] , a: List[str]="<|endoftext|>" , a: Union[str, Any]="<|endoftext|>" , a: Dict="<|startoftext|>" , a: Dict="<|endoftext|>" , a: Union[str, Any]=False , **a: Optional[int] , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(a):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ = do_clean_text
a_ , a_ , a_ , a_ = load_vocab_and_emoji(a , a)
a_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return len(self.raw_vocab)
def _lowerCAmelCase ( self: Dict) ->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder)
def _lowerCAmelCase ( self: Union[str, Any] , a: Any) ->Dict:
'''simple docstring'''
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text)
def _lowerCAmelCase ( self: int , a: List[Any]) ->Union[str, Any]:
'''simple docstring'''
return self.vocab.get(a , self.vocab.get(self.unk_token))
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(a)
def _lowerCAmelCase ( self: Optional[int] , a: Any) ->str:
'''simple docstring'''
a_ = "".join(a).strip()
return out_string
def _lowerCAmelCase ( self: Any , a: "Conversation") ->List[int]:
'''simple docstring'''
a_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
a_ = input_ids[-self.model_max_length :]
return input_ids
def _lowerCAmelCase ( self: int , a: str , a: Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
a_ = 0
if os.path.isdir(a):
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(a , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ = token_index
writer.write(",".join(a) + "\n")
index += 1
with open(a , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , a)
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[str] , a: Any , a: Union[str, Any] , a: Any) ->List[Any]:
'''simple docstring'''
a_ = vocab # same as swe
a_ = ids_to_tokens # same as bpe
a_ = emoji
a_ = np.max([len(a) for w in self.vocab.keys()])
a_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self: Dict) ->Any:
'''simple docstring'''
return len(self.ids_to_tokens)
def _lowerCAmelCase ( self: Union[str, Any] , a: Tuple) ->Any:
'''simple docstring'''
a_ = self.content_repattera.sub("<URL>" , a)
a_ = self.content_repattera.sub("<EMAIL>" , a)
a_ = self.content_repattera.sub("<TEL>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<DATE>" , a)
a_ = self.content_repattera.sub("<PRICE>" , a)
a_ = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def _lowerCAmelCase ( self: Any , a: int , a: Optional[int]=False) ->List[str]:
'''simple docstring'''
a_ = text.replace(" " , "<SP>")
a_ = text.replace(" " , "<SP>")
a_ = text.replace("\r\n" , "<BR>")
a_ = text.replace("\n" , "<BR>")
a_ = text.replace("\r" , "<BR>")
a_ = text.replace("\t" , "<TAB>")
a_ = text.replace("—" , "ー")
a_ = text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ = text.replace(a , a)
if clean:
a_ = self.clean_text(a)
def check_simbol(a: Dict):
a_ = x.encode()
if len(a) == 1 and len(a) == 2:
a_ = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC_2_A_1 and c <= 0XC_2_B_F)
or (c >= 0XC_7_8_0 and c <= 0XC_7_8_3)
or (c >= 0XC_A_B_9 and c <= 0XC_B_B_F)
or (c >= 0XC_C_8_0 and c <= 0XC_D_A_2)
):
return True
return False
def checkuae(a: str):
a_ = x.encode()
if len(a) == 1 and len(a) == 3:
a_ = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE_2_8_0_8_0 and c <= 0XE_2_B_0_7_F:
return True
return False
a_ = 0
a_ = []
while pos < len(a):
a_ = min(len(a) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ = [] # (token_id, token, pos)
for e in range(a , a , -1):
a_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a) > 2:
a_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(a) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ = sorted(a , key=lambda a: x[0])[0]
result.append(a)
a_ = e
else:
a_ = pos + 1
a_ = text[pos:end]
if check_simbol(a):
result.append("<KIGOU>")
elif checkuae(a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ = end
return result
def _lowerCAmelCase ( self: int , a: List[Any] , a: Any="\n") ->str:
'''simple docstring'''
a_ = []
a_ = []
a_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(a)
if len(a) > 0:
words.append(bytearray(a).decode("utf-8" , errors="replace"))
a_ = "".join(a)
return text
| 685 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , a: Optional[Any] , a: Dict=13 , a: List[str]=7 , a: Optional[Any]=True , a: int=True , a: Any=True , a: Optional[int]=True , a: int=True , a: Dict=False , a: Union[str, Any]=False , a: Dict=False , a: List[str]=2 , a: Union[str, Any]=99 , a: List[Any]=0 , a: Optional[int]=32 , a: List[str]=5 , a: int=4 , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Optional[int]=5_12 , a: str=12 , a: Dict=2 , a: Any=0.02 , a: Optional[int]=3 , a: str=4 , a: Optional[int]="last" , a: Tuple=None , a: Any=None , ) ->int:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_lengths
a_ = use_token_type_ids
a_ = use_labels
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = vocab_size
a_ = n_special
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = num_choices
a_ = summary_type
a_ = use_proj
a_ = scope
def _lowerCAmelCase ( self: Tuple) ->Dict:
'''simple docstring'''
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = random_attention_mask([self.batch_size, self.seq_length])
a_ = None
if self.use_input_lengths:
a_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a_ = None
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = ids_tensor([self.batch_size] , 2).float()
a_ = ids_tensor([self.batch_size] , self.num_choices)
a_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: List[Any] , a: List[Any] , a: Optional[int] , a: int , a: str , a: Any , a: str , a: List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModel(config=a)
model.to(a)
model.eval()
a_ = model(a , lengths=a , langs=a)
a_ = model(a , langs=a)
a_ = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any] , a: Dict , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Any , a: Tuple , a: str , a: List[str] , ) ->Dict:
'''simple docstring'''
a_ = FlaubertWithLMHeadModel(a)
model.to(a)
model.eval()
a_ = model(a , token_type_ids=a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCAmelCase ( self: Optional[int] , a: Tuple , a: Optional[Any] , a: List[Any] , a: List[str] , a: List[str] , a: List[str] , a: Optional[Any] , a: str , a: Union[str, Any] , ) ->str:
'''simple docstring'''
a_ = FlaubertForQuestionAnsweringSimple(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , start_positions=a , end_positions=a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Optional[Any] , a: Any , a: Dict , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , ) ->int:
'''simple docstring'''
a_ = FlaubertForQuestionAnswering(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , p_mask=a , )
a_ = model(
a , start_positions=a , end_positions=a , cls_index=a , is_impossible=a , )
((a_) , ) = result_with_labels.to_tuple()
a_ = model(a , start_positions=a , end_positions=a)
((a_) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def _lowerCAmelCase ( self: Union[str, Any] , a: List[str] , a: Tuple , a: Union[str, Any] , a: Any , a: Tuple , a: Union[str, Any] , a: int , a: int , a: Dict , ) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertForSequenceClassification(a)
model.to(a)
model.eval()
a_ = model(a)
a_ = model(a , labels=a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCAmelCase ( self: str , a: List[str] , a: Dict , a: Tuple , a: Optional[Any] , a: Any , a: Any , a: str , a: str , a: Optional[Any] , ) ->List[Any]:
'''simple docstring'''
a_ = self.num_labels
a_ = FlaubertForTokenClassification(a)
model.to(a)
model.eval()
a_ = model(a , attention_mask=a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCAmelCase ( self: Dict , a: Tuple , a: List[Any] , a: Dict , a: Optional[Any] , a: Optional[Any] , a: Optional[Any] , a: Union[str, Any] , a: List[str] , a: Tuple , ) ->Dict:
'''simple docstring'''
a_ = self.num_choices
a_ = FlaubertForMultipleChoice(config=a)
model.to(a)
model.eval()
a_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a_ = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCAmelCase ( self: Any) ->List[Any]:
'''simple docstring'''
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ , unittest.TestCase ):
_UpperCAmelCase =(
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase =(
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self: Optional[Any] , a: List[Any] , a: Any , a: List[str] , a: Union[str, Any] , a: int) ->int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: List[Any] , a: Tuple=False) ->List[Any]:
'''simple docstring'''
a_ = super()._prepare_for_class(a , a , return_labels=a)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
a_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a)
return inputs_dict
def _lowerCAmelCase ( self: Dict) ->Union[str, Any]:
'''simple docstring'''
a_ = FlaubertModelTester(self)
a_ = ConfigTester(self , config_class=a , emb_dim=37)
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self: List[str]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a)
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a)
def _lowerCAmelCase ( self: Optional[int]) ->Optional[Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a)
def _lowerCAmelCase ( self: Any) ->Optional[int]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a)
def _lowerCAmelCase ( self: Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a)
def _lowerCAmelCase ( self: List[Any]) ->Dict:
'''simple docstring'''
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a)
@slow
def _lowerCAmelCase ( self: Any) ->Any:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = FlaubertModel.from_pretrained(a)
self.assertIsNotNone(a)
@slow
@require_torch_gpu
def _lowerCAmelCase ( self: int) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a_ = True
a_ = model_class(config=a)
a_ = self._prepare_for_class(a , a)
a_ = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "traced_model.pt"))
a_ = torch.jit.load(os.path.join(a , "traced_model.pt") , map_location=a)
loaded(inputs_dict["input_ids"].to(a) , inputs_dict["attention_mask"].to(a))
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased")
a_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
with torch.no_grad():
a_ = model(a)[0]
a_ = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , a)
a_ = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4))
| 685 | 1 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
a_ = []
a_ = []
a_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
a_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
'emoji': True,
},
}
]
a_ = 0
for log in Path().glob('*.log'):
a_ = 0
with open(log, 'r') as f:
for line in f:
a_ = json.loads(line)
if line.get('nodeid', '') != "":
a_ = line['nodeid']
if line.get('duration', None) is not None:
a_ = F'{line["duration"]:.4f}'
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a_ = []
log.unlink()
a_ = ''
a_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
a_ = []
a_ = {}
for test in failed_tests:
a_ = test[0].split('::')
a_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
a_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a_ = [test[0] for test in failed_table]
a_ = list(set(files))
# Count number of instances in failed_tests
a_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
a_ = 'Too many failed tests, please see the full report in the Action results.'
a_ = len(err) + 10
a_ = message[: 3_000 - offset] + F'\n...\n```\n{err}'
print(F'### {message}')
else:
a_ = 'No failed tests! 🤗'
print(F'## {message}')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
a_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
a_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
a_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
a_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
a_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
a_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
a_ = row[0]
else:
a_ = ''
a_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 685 |
'''simple docstring'''
import math
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase__ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase (lowercase__ = 10001 ) -> int:
'''simple docstring'''
try:
a_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ = []
a_ = 2
while len(lowercase__ ) < nth:
if is_prime(lowercase__ ):
primes.append(lowercase__ )
num += 1
else:
num += 1
return primes[len(lowercase__ ) - 1]
if __name__ == "__main__":
print(F'{solution() = }')
| 685 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
a_ = UniSpeechSatForSequenceClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["projector.weight"]
a_ = downstream_dict["projector.bias"]
a_ = downstream_dict["model.post_net.linear.weight"]
a_ = downstream_dict["model.post_net.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Dict:
'''simple docstring'''
a_ = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["model.linear.weight"]
a_ = downstream_dict["model.linear.bias"]
return model
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> Optional[Any]:
'''simple docstring'''
a_ = UniSpeechSatForXVector.from_pretrained(lowercase__ ,config=lowercase__ )
a_ = downstream_dict["connector.weight"]
a_ = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
a_ = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
a_ = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
a_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
a_ = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> List[str]:
'''simple docstring'''
a_ = torch.load(lowercase__ ,map_location="cpu" )
a_ = checkpoint["Downstream"]
a_ = UniSpeechSatConfig.from_pretrained(lowercase__ )
a_ = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ ,return_attention_mask=lowercase__ ,do_normalize=lowercase__ )
a_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
a_ = convert_classification(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForAudioFrameClassification" ):
a_ = convert_diarization(lowercase__ ,lowercase__ ,lowercase__ )
elif arch.endswith("ForXVector" ):
a_ = convert_xvector(lowercase__ ,lowercase__ ,lowercase__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
a_ = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 685 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
_UpperCAmelCase ='''visual_bert'''
def __init__( self: Union[str, Any] , a: List[Any]=3_05_22 , a: List[Any]=7_68 , a: Union[str, Any]=5_12 , a: List[str]=12 , a: Tuple=12 , a: Optional[Any]=30_72 , a: int="gelu" , a: Union[str, Any]=0.1 , a: int=0.1 , a: str=5_12 , a: Optional[int]=2 , a: List[str]=0.02 , a: Optional[int]=1e-12 , a: str=False , a: Any=True , a: Tuple=1 , a: Dict=0 , a: Any=2 , **a: Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = visual_embedding_dim
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
a_ = bypass_transformer
a_ = special_visual_initialize
| 685 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self: int) ->str:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModel.from_pretrained(a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModel.from_pretrained(a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: List[str]) ->Dict:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForPreTraining.from_pretrained(a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForPreTraining.from_pretrained(a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: str) ->Tuple:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForCausalLM.from_pretrained(a , from_pt=a)
a_ , a_ = TFAutoModelForCausalLM.from_pretrained(
a , output_loading_info=a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForCausalLM.from_pretrained(a , from_tf=a)
a_ , a_ = AutoModelForCausalLM.from_pretrained(
a , output_loading_info=a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Any) ->int:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Tuple) ->str:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForMaskedLM.from_pretrained(a , from_pt=a)
a_ , a_ = TFAutoModelForMaskedLM.from_pretrained(
a , output_loading_info=a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForMaskedLM.from_pretrained(a , from_tf=a)
a_ , a_ = AutoModelForMaskedLM.from_pretrained(
a , output_loading_info=a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForSeqaSeqLM.from_pretrained(a , from_pt=a)
a_ , a_ = TFAutoModelForSeqaSeqLM.from_pretrained(
a , output_loading_info=a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForSeqaSeqLM.from_pretrained(a , from_tf=a)
a_ , a_ = AutoModelForSeqaSeqLM.from_pretrained(
a , output_loading_info=a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForSequenceClassification.from_pretrained(a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForSequenceClassification.from_pretrained(a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a_ = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = TFAutoModelForQuestionAnswering.from_pretrained(a , from_pt=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
a_ = AutoModelForQuestionAnswering.from_pretrained(a , from_tf=a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
def _lowerCAmelCase ( self: Dict) ->str:
'''simple docstring'''
a_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
a_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
def _lowerCAmelCase ( self: List[Any]) ->List[str]:
'''simple docstring'''
a_ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
a_ = AutoModelWithLMHead.from_pretrained(a , from_tf=a)
self.assertIsInstance(a , a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=a) , 1_44_10)
| 685 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
a_ , a_ = grid.shape
a_ = [-1, 1, 0, 0]
a_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
a_ , a_ = [(0, source)], set()
a_ = np.full((rows, cols) ,np.inf )
a_ = 0
a_ = np.empty((rows, cols) ,dtype=lowercase__ )
a_ = None
while queue:
((a_) , (a_)) = heappop(lowercase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
a_ = []
while (x, y) != source:
path.append((x, y) )
a_ , a_ = predecessors[x, y]
path.append(lowercase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase__ ) ):
a_ , a_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
a_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase__ ,(dist + 1, (nx, ny)) )
a_ = dist + 1
a_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowercase__ ,lowercase__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
a_ = str(lowercase__ )
a_ = "".join(sorted(lowercase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase (lowercase__ = 99 ) -> int:
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
a_ = 0
a_ = 1
while True:
if check_bouncy(lowercase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 685 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) -> Any:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
a_ = json.load(lowercase__ )
a_ = LukeConfig(use_entity_aware_attention=lowercase__ ,**metadata["model_config"] )
# Load in the weights from the checkpoint_path
a_ = torch.load(lowercase__ ,map_location="cpu" )["module"]
# Load the entity vocab file
a_ = load_original_entity_vocab(lowercase__ )
# add an entry for [MASK2]
a_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
a_ = AddedToken("<ent>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
a_ = AddedToken("<ent2>" ,lstrip=lowercase__ ,rstrip=lowercase__ )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"r" ) as f:
a_ = json.load(lowercase__ )
a_ = "MLukeTokenizer"
with open(os.path.join(lowercase__ ,"tokenizer_config.json" ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
with open(os.path.join(lowercase__ ,MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) ,"w" ) as f:
json.dump(lowercase__ ,lowercase__ )
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
a_ = tokenizer.convert_tokens_to_ids(["@"] )[0]
a_ = tokenizer.convert_tokens_to_ids(["#"] )[0]
a_ = state_dict["embeddings.word_embeddings.weight"]
a_ = word_emb[ent_init_index].unsqueeze(0 )
a_ = word_emb[enta_init_index].unsqueeze(0 )
a_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ = state_dict[bias_name]
a_ = decoder_bias[ent_init_index].unsqueeze(0 )
a_ = decoder_bias[enta_init_index].unsqueeze(0 )
a_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ = F"""encoder.layer.{layer_index}.attention.self."""
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
a_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ = state_dict["entity_predictions.bias"]
a_ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ = LukeForMaskedLM(config=lowercase__ ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
a_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
a_ = state_dict[key]
else:
a_ = state_dict[key]
a_ , a_ = model.load_state_dict(lowercase__ ,strict=lowercase__ )
if set(lowercase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ = MLukeTokenizer.from_pretrained(lowercase__ ,task="entity_classification" )
a_ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ = (0, 9)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 33, 768) )
a_ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ = torch.Size((1, 1, 768) )
a_ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,lowercase__ ,atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ = MLukeTokenizer.from_pretrained(lowercase__ )
a_ = "Tokyo is the capital of <mask>."
a_ = (24, 30)
a_ = tokenizer(lowercase__ ,entity_spans=[span] ,return_tensors="pt" )
a_ = model(**lowercase__ )
a_ = encoding["input_ids"][0].tolist()
a_ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
a_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase__ )
a_ = outputs.entity_logits[0][0].argmax().item()
a_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def __UpperCAmelCase (lowercase__ ) -> Any:
'''simple docstring'''
a_ = ["[MASK]", "[PAD]", "[UNK]"]
a_ = [json.loads(lowercase__ ) for line in open(lowercase__ )]
a_ = {}
for entry in data:
a_ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ = entity_id
break
a_ = F"""{language}:{entity_name}"""
a_ = entity_id
return new_mapping
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase__ ,lowercase__ ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase (lowercase__ ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(lowercase__ ,lowercase__ ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = "hf-internal-testing/tiny-random-t5"
a_ = AutoTokenizer.from_pretrained(a)
a_ = AutoModelForSeqaSeqLM.from_pretrained(a)
a_ = tokenizer("This is me" , return_tensors="pt")
a_ = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
a_ = model.generate(**a)
a_ = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a)
a_ = AutoModelForSeqaSeqLM.from_pretrained(a)
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
a_ = model_reloaded.generate(**a)
self.assertTrue(torch.allclose(a , a))
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
a_ = "hf-internal-testing/tiny-random-t5"
a_ = AutoModelForSeqaSeqLM.from_pretrained(a)
a_ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a):
model.save_pretrained(a)
a_ = model.reverse_bettertransformer()
model.save_pretrained(a)
| 685 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase (lowercase__ = 5000 ) -> int:
'''simple docstring'''
a_ = [(i * (3 * i - 1)) // 2 for i in range(1 ,lowercase__ )]
for i, pentagonal_i in enumerate(lowercase__ ):
for j in range(lowercase__ ,len(lowercase__ ) ):
a_ = pentagonal_nums[j]
a_ = pentagonal_i + pentagonal_j
a_ = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ):
return b
return -1
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =ProphetNetTokenizer
_UpperCAmelCase =False
def _lowerCAmelCase ( self: Optional[Any]) ->List[Any]:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[int] , a: Optional[Any]) ->Optional[int]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[Any]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [9, 6, 7, 12, 10, 11])
def _lowerCAmelCase ( self: str) ->Any:
'''simple docstring'''
a_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz") , ["ah", "\u535A", "\u63A8", "zz"])
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCAmelCase ( self: List[Any]) ->str:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["h\u00E9llo"])
def _lowerCAmelCase ( self: Optional[int]) ->str:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo") , ["hello"])
def _lowerCAmelCase ( self: int) ->int:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? ") , ["HeLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCAmelCase ( self: Dict) ->Tuple:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HäLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? ") , ["HaLLo", "!", "how", "Are", "yoU", "?"])
def _lowerCAmelCase ( self: Any) ->List[str]:
'''simple docstring'''
a_ = BasicTokenizer(do_lower_case=a , never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]") , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def _lowerCAmelCase ( self: str) ->Optional[Any]:
'''simple docstring'''
a_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
a_ = {}
for i, token in enumerate(a):
a_ = i
a_ = WordpieceTokenizer(vocab=a , unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize("") , [])
self.assertListEqual(tokenizer.tokenize("unwanted running") , ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running") , ["[UNK]", "runn", "##ing"])
@require_torch
def _lowerCAmelCase ( self: Tuple) ->List[str]:
'''simple docstring'''
a_ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
a_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
a_ = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
a_ = tokenizer(a , padding=a , return_tensors="pt")
self.assertIsInstance(a , a)
a_ = list(batch.input_ids.numpy()[0])
self.assertListEqual(a , a)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def _lowerCAmelCase ( self: List[Any]) ->Tuple:
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def _lowerCAmelCase ( self: List[str]) ->str:
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
@slow
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased")
a_ = tokenizer.encode("sequence builders" , add_special_tokens=a)
a_ = tokenizer.encode("multi-sequence build" , add_special_tokens=a)
a_ = tokenizer.build_inputs_with_special_tokens(a)
a_ = tokenizer.build_inputs_with_special_tokens(a , a)
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 685 |
'''simple docstring'''
import argparse
import os
import re
a_ = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a_ = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a_ = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def __UpperCAmelCase (lowercase__ ,lowercase__ = False ) -> List[Any]:
'''simple docstring'''
with open(lowercase__ ,"r" ,encoding="utf-8" ) as f:
a_ = f.read()
a_ = content.split("\n" )
a_ = []
a_ = 0
while line_idx < len(lowercase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a_ = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a_ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a_ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a_ = sorted(lowercase__ ,key=lambda lowercase__ : _re_identifier.search(lowercase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase__ ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(lowercase__ ) )
elif "\n".join(lowercase__ ) != content:
return True
def __UpperCAmelCase (lowercase__ = False ) -> Optional[int]:
'''simple docstring'''
a_ = [os.path.join(lowercase__ ,lowercase__ ) for f in os.listdir(lowercase__ ) if f.endswith(".py" )]
a_ = [sort_auto_mapping(lowercase__ ,overwrite=lowercase__ ) for fname in fnames]
if not overwrite and any(lowercase__ ):
a_ = [f for f, d in zip(lowercase__ ,lowercase__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(lowercase__ )}. Run `make style` to fix"""
" this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 2000000 ) -> int:
'''simple docstring'''
a_ = [0 for i in range(n + 1 )]
a_ = 1
a_ = 1
for i in range(2 ,int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i ,n + 1 ,lowercase__ ):
a_ = 1
a_ = 0
for i in range(lowercase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }')
| 685 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a_ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
a_ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
a_ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> int:
'''simple docstring'''
return float((preds == labels).mean() )
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> Tuple:
'''simple docstring'''
a_ = simple_accuracy(lowercase__ ,lowercase__ )
a_ = float(fa_score(y_true=lowercase__ ,y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase (lowercase__ ,lowercase__ ) -> List[Any]:
'''simple docstring'''
a_ = float(pearsonr(lowercase__ ,lowercase__ )[0] )
a_ = float(spearmanr(lowercase__ ,lowercase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def _lowerCAmelCase ( self: int) ->List[str]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "stsb" else "float32"),
"references": datasets.Value("int64" if self.config_name != "stsb" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def _lowerCAmelCase ( self: str , a: Optional[Any] , a: Any) ->Any:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(a , a)}
elif self.config_name == "stsb":
return pearson_and_spearman(a , a)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(a , a)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(a , a)}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", "
"\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]")
| 685 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
pass
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Optional[Any]) ->List[str]:
'''simple docstring'''
a_ = [
[],
[],
[],
]
def _lowerCAmelCase ( self: Dict , a: int , a: int) ->None:
'''simple docstring'''
try:
if len(self.queues[priority]) >= 1_00:
raise OverflowError("Maximum queue size is 100")
self.queues[priority].append(a)
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2")
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0)
raise UnderFlowError("All queues are empty")
def __str__( self: Dict) ->str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues))
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Any) ->List[str]:
'''simple docstring'''
a_ = []
def _lowerCAmelCase ( self: int , a: int) ->None:
'''simple docstring'''
if len(self.queue) == 1_00:
raise OverFlowError("Maximum queue size is 100")
self.queue.append(a)
def _lowerCAmelCase ( self: List[str]) ->int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty")
else:
a_ = min(self.queue)
self.queue.remove(a)
return data
def __str__( self: Optional[int]) ->str:
'''simple docstring'''
return str(self.queue)
def __UpperCAmelCase () -> Union[str, Any]:
'''simple docstring'''
a_ = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase () -> List[Any]:
'''simple docstring'''
a_ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a_ = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,lowercase__=None ,) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
a_ = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
a_ = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
a_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self: Dict , a: str , a: Any=13 , a: Union[str, Any]=7 , a: str=True , a: int=False , a: Optional[int]=99 , a: Optional[int]=16 , a: Union[str, Any]=2 , a: int=4 , a: Optional[int]=4 , a: Optional[Any]="gelu" , a: List[str]=0.1 , a: Any=0.1 , a: str=32 , a: Dict=2 , a: Tuple=1 , a: Optional[int]=0 , a: Any=0.02 , ) ->Any:
'''simple docstring'''
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
a_ = initializer_range
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
a_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
a_ = shift_tokens_right(a , 1 , 2)
a_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=a , )
a_ = prepare_blenderbot_inputs_dict(a , a , a)
return config, inputs_dict
def _lowerCAmelCase ( self: str) ->int:
'''simple docstring'''
a_ , a_ = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self: int , a: str , a: str , a: List[Any]) ->Tuple:
'''simple docstring'''
a_ = 20
a_ = model_class_name(a)
a_ = model.encode(inputs_dict["input_ids"])
a_ , a_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a_ = model.init_cache(decoder_input_ids.shape[0] , a , a)
a_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
a_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
a_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a_ = model.decode(
decoder_input_ids[:, -1:] , a , decoder_attention_mask=a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=a , )
a_ = model.decode(a , a)
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def _lowerCAmelCase ( self: List[Any] , a: str , a: Any , a: List[Any]) ->Dict:
'''simple docstring'''
a_ = 20
a_ = model_class_name(a)
a_ = model.encode(inputs_dict["input_ids"])
a_ , a_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
a_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
a_ = model.init_cache(decoder_input_ids.shape[0] , a , a)
a_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a_ = model.decode(
decoder_input_ids[:, :-1] , a , decoder_attention_mask=a , past_key_values=a , decoder_position_ids=a , )
a_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
a_ = model.decode(
decoder_input_ids[:, -1:] , a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=a , decoder_position_ids=a , )
a_ = model.decode(a , a , decoder_attention_mask=a)
a_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_UpperCAmelCase =99
def _lowerCAmelCase ( self: List[str]) ->Any:
'''simple docstring'''
a_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a_ = input_ids.shape[0]
a_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCAmelCase ( self: List[str]) ->List[Any]:
'''simple docstring'''
a_ , a_ , a_ = self._get_config_and_data()
a_ = FlaxBlenderbotForConditionalGeneration(a)
a_ = lm_model(input_ids=a)
a_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , a)
def _lowerCAmelCase ( self: Optional[int]) ->Union[str, Any]:
'''simple docstring'''
a_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a_ = FlaxBlenderbotForConditionalGeneration(a)
a_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
a_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
a_ = lm_model(input_ids=a , decoder_input_ids=a)
a_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , a)
def _lowerCAmelCase ( self: int) ->Any:
'''simple docstring'''
a_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
a_ = shift_tokens_right(a , 1 , 2)
a_ = np.equal(a , 1).astype(np.floataa).sum()
a_ = np.equal(a , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(a , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase , lowercase_ ):
_UpperCAmelCase =True
_UpperCAmelCase =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_UpperCAmelCase =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase ( self: Union[str, Any]) ->int:
'''simple docstring'''
a_ = FlaxBlenderbotModelTester(self)
def _lowerCAmelCase ( self: Optional[int]) ->Tuple:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a , a , a)
def _lowerCAmelCase ( self: List[Any]) ->int:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a , a , a)
def _lowerCAmelCase ( self: Optional[Any]) ->Dict:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a_ = self._prepare_for_class(a , a)
a_ = model_class(a)
@jax.jit
def encode_jitted(a: Any , a: Optional[Any]=None , **a: Union[str, Any]):
return model.encode(input_ids=a , attention_mask=a)
with self.subTest("JIT Enabled"):
a_ = encode_jitted(**a).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a_ = encode_jitted(**a).to_tuple()
self.assertEqual(len(a) , len(a))
for jitted_output, output in zip(a , a):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCAmelCase ( self: Optional[Any]) ->Optional[int]:
'''simple docstring'''
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a_ = model_class(a)
a_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
a_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a: Optional[Any] , a: Any , a: Dict):
return model.decode(
decoder_input_ids=a , decoder_attention_mask=a , encoder_outputs=a , )
with self.subTest("JIT Enabled"):
a_ = decode_jitted(**a).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a_ = decode_jitted(**a).to_tuple()
self.assertEqual(len(a) , len(a))
for jitted_output, output in zip(a , a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _lowerCAmelCase ( self: Optional[Any]) ->Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
a_ = model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a_ = np.ones((1, 1)) * model.config.eos_token_id
a_ = model(a)
self.assertIsNotNone(a)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
a_ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
a_ = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=a)
a_ = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
a_ = ["Sam"]
a_ = tokenizer(a , return_tensors="jax")
a_ = model.generate(**a , **a)
a_ = "Sam is a great name. It means \"sun\" in Gaelic."
a_ = tokenizer.batch_decode(a , **a)
assert generated_txt[0].strip() == tgt_text
| 685 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase () -> Optional[Any]:
'''simple docstring'''
a_ = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a_ = Dataset.from_dict(lowercase__ )
return dataset
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def _lowerCAmelCase ( self: Union[str, Any]) ->Optional[int]:
'''simple docstring'''
a_ = get_dataset()
a_ = make_duplicate_clusters(a , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def _lowerCAmelCase ( self: Any) ->Dict:
'''simple docstring'''
a_ = get_dataset()
a_ , a_ = deduplicate_dataset(a)
self.assertEqual(len(a) , 2)
print(a)
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2)
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , a)
| 685 | 1 |
'''simple docstring'''
def __UpperCAmelCase (lowercase__ = 200 ) -> int:
'''simple docstring'''
a_ = [1, 2, 5, 10, 20, 50, 100, 200]
a_ = [0] * (pence + 1)
a_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ ,pence + 1 ,1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 685 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase_ ):
def __init__( self: List[Any] , *a: str , **a: Tuple) ->None:
'''simple docstring'''
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , a , )
super().__init__(*a , **a)
| 685 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.