code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : int = '''mctct'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: Optional[Any]=8065 , _SCREAMING_SNAKE_CASE: List[str]=1536 , _SCREAMING_SNAKE_CASE: Optional[int]=36 , _SCREAMING_SNAKE_CASE: Optional[int]=6144 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: str=384 , _SCREAMING_SNAKE_CASE: Union[str, Any]=920 , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: str="relu" , _SCREAMING_SNAKE_CASE: int=0.02 , _SCREAMING_SNAKE_CASE: Any=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: List[str]=1 , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: List[str]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Optional[int]=(7,) , _SCREAMING_SNAKE_CASE: List[str]=(3,) , _SCREAMING_SNAKE_CASE: Tuple=80 , _SCREAMING_SNAKE_CASE: Any=1 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Any="sum" , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = attention_head_dim
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = layerdrop
UpperCamelCase_ = hidden_act
UpperCamelCase_ = initializer_range
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = conv_glu_dim
UpperCamelCase_ = conv_dropout
UpperCamelCase_ = num_conv_layers
UpperCamelCase_ = input_feat_per_channel
UpperCamelCase_ = input_channels
UpperCamelCase_ = conv_channels
UpperCamelCase_ = ctc_loss_reduction
UpperCamelCase_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
UpperCamelCase_ = list(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = list(_SCREAMING_SNAKE_CASE )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 328 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 1 |
from __future__ import annotations
from collections import deque
class _UpperCamelCase :
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: list[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_SCREAMING_SNAKE_CASE )
self.set_fail_transitions()
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str ) -> int | None:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> None:
"""simple docstring"""
UpperCamelCase_ = 0
for character in keyword:
UpperCamelCase_ = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase_ = len(self.adlist ) - 1
else:
UpperCamelCase_ = next_state
self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> None:
"""simple docstring"""
UpperCamelCase_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = 0
while q:
UpperCamelCase_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCamelCase_ = self.adlist[state]["fail_state"]
UpperCamelCase_ = self.find_next_state(
_SCREAMING_SNAKE_CASE , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase_ = 0
UpperCamelCase_ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> dict[str, list[int]]:
"""simple docstring"""
UpperCamelCase_ = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase_ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
while (
self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) is None
and current_state != 0
):
UpperCamelCase_ = self.adlist[current_state]["fail_state"]
UpperCamelCase_ = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] )
if next_state is None:
UpperCamelCase_ = 0
else:
UpperCamelCase_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase_ = []
result[key].append(i - len(_SCREAMING_SNAKE_CASE ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 1 |
_UpperCAmelCase = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase = [None] * 1_0_0_0_0_0_0_0
_UpperCAmelCase = True
_UpperCAmelCase = False
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase_ = chain(next_number(UpperCamelCase_ ) )
UpperCamelCase_ = number_chain
while number < 10000000:
UpperCamelCase_ = number_chain
number *= 10
return number_chain
def lowerCAmelCase_ ( UpperCamelCase_ = 10000000 ) -> int:
for i in range(1 , UpperCamelCase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 328 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = filter(lambda UpperCamelCase_ : p.requires_grad , model.parameters() )
UpperCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase = logging.getLogger(__name__)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
if metric == "rouge2":
UpperCamelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCamelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCamelCase_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
UpperCamelCase_ = ModelCheckpoint(
dirpath=UpperCamelCase_ , filename=UpperCamelCase_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=UpperCamelCase_ , verbose=UpperCamelCase_ , )
class _UpperCamelCase ( pl.Callback ):
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str=True ) -> None:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCamelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCamelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase_ = od / "test_results.txt"
UpperCamelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCamelCase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "a+" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase_ = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase_ = val.item()
UpperCamelCase_ = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any:
"""simple docstring"""
try:
UpperCamelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase_ = pl_module.model.num_parameters()
UpperCamelCase_ = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule ) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "test" )
@rank_zero_only
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 328 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = '''mobilenet_v2'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Tuple=224 , _SCREAMING_SNAKE_CASE: List[str]=1.0 , _SCREAMING_SNAKE_CASE: Optional[int]=8 , _SCREAMING_SNAKE_CASE: Dict=8 , _SCREAMING_SNAKE_CASE: Optional[Any]=6 , _SCREAMING_SNAKE_CASE: Tuple=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str="relu6" , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[int]=0.8 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: Tuple=0.0_01 , _SCREAMING_SNAKE_CASE: int=255 , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = depth_multiplier
UpperCamelCase_ = depth_divisible_by
UpperCamelCase_ = min_depth
UpperCamelCase_ = expand_ratio
UpperCamelCase_ = output_stride
UpperCamelCase_ = first_layer_is_expansion
UpperCamelCase_ = finegrained_output
UpperCamelCase_ = hidden_act
UpperCamelCase_ = tf_padding
UpperCamelCase_ = classifier_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = semantic_loss_ignore_index
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Any = version.parse('''1.11''' )
@property
def lowercase ( self: Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowercase ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowercase ( self: Optional[int] ) -> float:
"""simple docstring"""
return 1e-4
| 328 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_UpperCAmelCase = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
require_version(deps[pkg] , UpperCamelCase_ )
| 328 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_UpperCAmelCase = data_utils.TransfoXLTokenizer
_UpperCAmelCase = data_utils.TransfoXLCorpus
_UpperCAmelCase = data_utils
_UpperCAmelCase = data_utils
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase_ , "rb" ) as fp:
UpperCamelCase_ = pickle.load(UpperCamelCase_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_ = corpus.vocab.__dict__
torch.save(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , UpperCamelCase_ )
UpperCamelCase_ = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_ = os.path.abspath(UpperCamelCase_ )
UpperCamelCase_ = os.path.abspath(UpperCamelCase_ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_ = TransfoXLConfig()
else:
UpperCamelCase_ = TransfoXLConfig.from_json_file(UpperCamelCase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_ = TransfoXLLMHeadModel(UpperCamelCase_ )
UpperCamelCase_ = load_tf_weights_in_transfo_xl(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
UpperCamelCase_ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase_ )}''' )
torch.save(model.state_dict() , UpperCamelCase_ )
print(F'''Save configuration file to {os.path.abspath(UpperCamelCase_ )}''' )
with open(UpperCamelCase_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_UpperCAmelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 328 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328 | 1 |
import os
import pytest
from attr import dataclass
_UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str
_UpperCamelCase : Any = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_UpperCamelCase : List[str] = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_UpperCamelCase : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowercase ( self: Dict ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Dict:
UpperCamelCase_ = SageMakerTestEnvironment(framework=request.cls.framework )
| 328 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 1 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_UpperCAmelCase = namedtuple('covid_data', 'cases deaths recovered')
def lowerCAmelCase_ ( UpperCamelCase_ = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
UpperCamelCase_ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(UpperCamelCase_ ).content ).xpath(UpperCamelCase_ ) )
_UpperCAmelCase = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 328 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = ['model.decoder.embed_positions.weights']
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Any:
if "emb" in name:
UpperCamelCase_ = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
UpperCamelCase_ = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
UpperCamelCase_ = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
UpperCamelCase_ = name.replace("linear1" , "fc1" )
if "linear2" in name:
UpperCamelCase_ = name.replace("linear2" , "fc2" )
if "norm1" in name:
UpperCamelCase_ = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
UpperCamelCase_ = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
UpperCamelCase_ = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
UpperCamelCase_ = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
UpperCamelCase_ = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCamelCase_ = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple[Dict, Dict]:
UpperCamelCase_ = list(state_dict.keys() )
UpperCamelCase_ = {}
for key in keys:
UpperCamelCase_ = state_dict.pop(UpperCamelCase_ )
UpperCamelCase_ = rename_keys(UpperCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCamelCase_ = val[:hidden_size, :]
UpperCamelCase_ = val[hidden_size : 2 * hidden_size, :]
UpperCamelCase_ = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCamelCase_ = val
else:
UpperCamelCase_ = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase_ ( UpperCamelCase_ ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
UpperCamelCase_ = 1024
UpperCamelCase_ = 24
UpperCamelCase_ = 16
elif checkpoint == "medium":
UpperCamelCase_ = 1536
UpperCamelCase_ = 48
UpperCamelCase_ = 24
elif checkpoint == "large":
UpperCamelCase_ = 2048
UpperCamelCase_ = 48
UpperCamelCase_ = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
UpperCamelCase_ = MusicgenDecoderConfig(
hidden_size=UpperCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , )
return config
@torch.no_grad()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="cpu" ) -> str:
UpperCamelCase_ = MusicGen.get_pretrained(UpperCamelCase_ , device=UpperCamelCase_ )
UpperCamelCase_ = decoder_config_from_checkpoint(UpperCamelCase_ )
UpperCamelCase_ = fairseq_model.lm.state_dict()
UpperCamelCase_ , UpperCamelCase_ = rename_state_dict(
UpperCamelCase_ , hidden_size=decoder_config.hidden_size )
UpperCamelCase_ = TaEncoderModel.from_pretrained("t5-base" )
UpperCamelCase_ = EncodecModel.from_pretrained("facebook/encodec_32khz" )
UpperCamelCase_ = MusicgenForCausalLM(UpperCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCamelCase_ , UpperCamelCase_ = decoder.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(UpperCamelCase_ ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
UpperCamelCase_ = MusicgenForConditionalGeneration(text_encoder=UpperCamelCase_ , audio_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(UpperCamelCase_ )
# check we can do a forward pass
UpperCamelCase_ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCamelCase_ = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCamelCase_ = model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
UpperCamelCase_ = AutoTokenizer.from_pretrained("t5-base" )
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
UpperCamelCase_ = MusicgenProcessor(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
# set the appropriate bos/pad token ids
UpperCamelCase_ = 2048
UpperCamelCase_ = 2048
# set other default generation config params
UpperCamelCase_ = int(30 * audio_encoder.config.frame_rate )
UpperCamelCase_ = True
UpperCamelCase_ = 3.0
if pytorch_dump_folder is not None:
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(UpperCamelCase_ )
processor.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_UpperCAmelCase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 328 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_UpperCAmelCase = getLogger(__name__)
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 8 , UpperCamelCase_ = DEFAULT_DEVICE , UpperCamelCase_=False , UpperCamelCase_="summarization" , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Dict:
UpperCamelCase_ = Path(UpperCamelCase_ ).open("w" , encoding="utf-8" )
UpperCamelCase_ = str(UpperCamelCase_ )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ )
if fpaa:
UpperCamelCase_ = model.half()
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
UpperCamelCase_ = time.time()
# update config with task specific params
use_task_specific_params(UpperCamelCase_ , UpperCamelCase_ )
if prefix is None:
UpperCamelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(UpperCamelCase_ , UpperCamelCase_ ) ) ):
UpperCamelCase_ = [prefix + text for text in examples_chunk]
UpperCamelCase_ = tokenizer(UpperCamelCase_ , return_tensors="pt" , truncation=UpperCamelCase_ , padding="longest" ).to(UpperCamelCase_ )
UpperCamelCase_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **UpperCamelCase_ , )
UpperCamelCase_ = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCamelCase_ = int(time.time() - start_time ) # seconds
UpperCamelCase_ = len(UpperCamelCase_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowerCAmelCase_ ( ) -> List[Any]:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowerCAmelCase_ ( UpperCamelCase_=True ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("model_name" , type=UpperCamelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=UpperCamelCase_ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=UpperCamelCase_ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=UpperCamelCase_ , required=UpperCamelCase_ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=UpperCamelCase_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=UpperCamelCase_ , default=8 , required=UpperCamelCase_ , help="batch size" )
parser.add_argument(
"--n_obs" , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=UpperCamelCase_ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCamelCase_ , UpperCamelCase_ = parser.parse_known_args()
UpperCamelCase_ = parse_numeric_n_bool_cl_kwargs(UpperCamelCase_ )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
UpperCamelCase_ = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCamelCase_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=UpperCamelCase_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
UpperCamelCase_ = generate_summaries_or_translations(
UpperCamelCase_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **UpperCamelCase_ , )
if args.reference_path is None:
return {}
# Compute scores
UpperCamelCase_ = calculate_bleu if "translation" in args.task else calculate_rouge
UpperCamelCase_ = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCamelCase_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCamelCase_ )]
UpperCamelCase_ = score_fn(UpperCamelCase_ , UpperCamelCase_ )
scores.update(UpperCamelCase_ )
if args.dump_args:
scores.update(UpperCamelCase_ )
if args.info:
UpperCamelCase_ = args.info
if verbose:
print(UpperCamelCase_ )
if args.score_path is not None:
json.dump(UpperCamelCase_ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: int ) -> Optional[Any]:
"""simple docstring"""
self.test()
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = 0
UpperCamelCase_ = False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase_ = self.advance()
if not self.does_advance(_SCREAMING_SNAKE_CASE ):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.update(_SCREAMING_SNAKE_CASE )
counter += 1
if counter > 10000:
raise Exception("update() does not fulfill the constraint." )
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly." )
@abstractmethod
def lowercase ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: int ) -> int:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: int ) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Tuple=False ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[int] ) -> Union[str, Any]:
"""simple docstring"""
super(_SCREAMING_SNAKE_CASE , self ).__init__()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCamelCase_ = token_ids
UpperCamelCase_ = len(self.token_ids )
UpperCamelCase_ = -1 # the index of the currently fulfilled step
UpperCamelCase_ = False
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: int ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
if self.does_advance(_SCREAMING_SNAKE_CASE ):
self.fulfilled_idx += 1
UpperCamelCase_ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase_ = True
UpperCamelCase_ = completed
else:
# failed to make progress.
UpperCamelCase_ = True
self.reset()
return stepped, completed, reset
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = False
UpperCamelCase_ = 0
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCamelCase_ = self.seqlen
UpperCamelCase_ = self.fulfilled_idx
UpperCamelCase_ = self.completed
return new_constraint
class _UpperCamelCase :
def __init__( self: int , _SCREAMING_SNAKE_CASE: List[List[int]] , _SCREAMING_SNAKE_CASE: Tuple=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = max([len(_SCREAMING_SNAKE_CASE ) for one in nested_token_ids] )
UpperCamelCase_ = {}
for token_ids in nested_token_ids:
UpperCamelCase_ = root
for tidx, token_id in enumerate(_SCREAMING_SNAKE_CASE ):
if token_id not in level:
UpperCamelCase_ = {}
UpperCamelCase_ = level[token_id]
if no_subsets and self.has_subsets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
f''' {nested_token_ids}.''' )
UpperCamelCase_ = root
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.trie
for current_token in current_seq:
UpperCamelCase_ = start[current_token]
UpperCamelCase_ = list(start.keys() )
return next_tokens
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.next_tokens(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) == 0
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = list(root.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 1
else:
return sum([self.count_leaves(_SCREAMING_SNAKE_CASE ) for nn in next_nodes] )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.count_leaves(_SCREAMING_SNAKE_CASE )
return len(_SCREAMING_SNAKE_CASE ) != leaf_count
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[List[int]] ) -> Dict:
"""simple docstring"""
super(_SCREAMING_SNAKE_CASE , self ).__init__()
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCamelCase_ = DisjunctiveTrie(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nested_token_ids
UpperCamelCase_ = self.trie.max_height
UpperCamelCase_ = []
UpperCamelCase_ = False
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.trie.next_tokens(self.current_seq )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: int ) -> Optional[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
if self.does_advance(_SCREAMING_SNAKE_CASE ):
self.current_seq.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = True
else:
UpperCamelCase_ = True
self.reset()
UpperCamelCase_ = self.trie.reached_leaf(self.current_seq )
UpperCamelCase_ = completed
return stepped, completed, reset
def lowercase ( self: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = False
UpperCamelCase_ = []
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCamelCase_ = self.seqlen
UpperCamelCase_ = self.current_seq
UpperCamelCase_ = self.completed
return new_constraint
class _UpperCamelCase :
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Constraint] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase_ = max([c.seqlen for c in constraints] )
UpperCamelCase_ = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = False
self.init_state()
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = None
UpperCamelCase_ = [constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) for constraint in self.constraints]
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowercase ( self: List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase_ = constraint.advance()
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
token_list.append(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
token_list.extend(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = self.inprogress_constraint.advance()
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
token_list.append(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
token_list.extend(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return None
else:
return token_list
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[List[int]] ) -> Union[str, Any]:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase_ , UpperCamelCase_ = self.add(_SCREAMING_SNAKE_CASE )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCamelCase_ , UpperCamelCase_ = False, False
if self.completed:
UpperCamelCase_ = True
UpperCamelCase_ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.inprogress_constraint.update(_SCREAMING_SNAKE_CASE )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCamelCase_ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCamelCase_ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = pending_constraint.update(_SCREAMING_SNAKE_CASE )
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true." )
if complete:
self.complete_constraints.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = None
if not complete and stepped:
UpperCamelCase_ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase_ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase_ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Tuple=True ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase_ = [
constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase_ = self.inprogress_constraint.copy(stateful=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int]=7 , _SCREAMING_SNAKE_CASE: str=3 , _SCREAMING_SNAKE_CASE: Optional[int]=18 , _SCREAMING_SNAKE_CASE: Optional[int]=30 , _SCREAMING_SNAKE_CASE: Dict=400 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: Tuple=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Optional[Any]=[0.5, 0.5, 0.5] , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"shortest_edge": 18}
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : str = LevitImageProcessor if is_vision_available() else None
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = LevitImageProcessingTester(self )
@property
def lowercase ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
def lowercase ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowercase ( self: Tuple ) -> Tuple:
"""simple docstring"""
pass
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 328 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 328 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_UpperCAmelCase = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
_UpperCAmelCase = 'hopper-medium-v2'
_UpperCAmelCase = gym.make(env_name)
_UpperCAmelCase = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
_UpperCAmelCase = env.reset()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_UpperCAmelCase = pipeline(obs, planning_horizon=3_2)
# execute action in environment
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = env.step(denorm_actions)
_UpperCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
_UpperCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 328 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowercase ( self: Union[str, Any] ) -> dict:
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
UpperCamelCase_ = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Tuple ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase ( self: List[str] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( UpperCamelCase_ = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 328 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCAmelCase_ ( UpperCamelCase_ = 8 ) -> str:
UpperCamelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase_ )
UpperCamelCase_ = i // 3
UpperCamelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase_ = (
chars_incl
+ random(UpperCamelCase_ , quotient + remainder )
+ random(UpperCamelCase_ , UpperCamelCase_ )
+ random(UpperCamelCase_ , UpperCamelCase_ )
)
UpperCamelCase_ = list(UpperCamelCase_ )
shuffle(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
# random is a generalised function for letters, characters and numbers
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
pass # Put your code here...
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
pass # Put your code here...
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
pass # Put your code here...
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 8 ) -> bool:
if len(UpperCamelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase_ = any(char in ascii_uppercase for char in password )
UpperCamelCase_ = any(char in ascii_lowercase for char in password )
UpperCamelCase_ = any(char in digits for char in password )
UpperCamelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCamelCase_ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(UpperCamelCase_ ) )
print(
"Alternative Password generated:" , alternative_password_generator(UpperCamelCase_ , UpperCamelCase_ ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 328 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 1 |
from __future__ import annotations
_UpperCAmelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> tuple[list[list[int]], list[list[int]]]:
UpperCamelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase_ ) )
] # the reference grid
UpperCamelCase_ = 1
UpperCamelCase_ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase_ ) )
] # the action grid
UpperCamelCase_ = init[0]
UpperCamelCase_ = init[1]
UpperCamelCase_ = 0
UpperCamelCase_ = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCamelCase_ = [[f, g, x, y]]
UpperCamelCase_ = False # flag that is set when search is complete
UpperCamelCase_ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCamelCase_ = cell.pop()
UpperCamelCase_ = next_cell[2]
UpperCamelCase_ = next_cell[3]
UpperCamelCase_ = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCamelCase_ = True
else:
for i in range(len(UpperCamelCase_ ) ): # to try out different valid actions
UpperCamelCase_ = x + DIRECTIONS[i][0]
UpperCamelCase_ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCamelCase_ = g + cost
UpperCamelCase_ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCamelCase_ = 1
UpperCamelCase_ = i
UpperCamelCase_ = []
UpperCamelCase_ = goal[0]
UpperCamelCase_ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCamelCase_ = x - DIRECTIONS[action[x][y]][0]
UpperCamelCase_ = y - DIRECTIONS[action[x][y]][1]
UpperCamelCase_ = xa
UpperCamelCase_ = ya
invpath.append([x, y] )
UpperCamelCase_ = []
for i in range(len(UpperCamelCase_ ) ):
path.append(invpath[len(UpperCamelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_UpperCAmelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_UpperCAmelCase = [0, 0]
# all coordinates are given in format [y,x]
_UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1]
_UpperCAmelCase = 1
# the cost map which pushes the path closer to the goal
_UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_UpperCAmelCase = 9_9
_UpperCAmelCase , _UpperCAmelCase = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 328 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328 | 1 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCAmelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = '''mask2former'''
_UpperCamelCase : str = ['''swin''']
_UpperCamelCase : List[Any] = {'''hidden_size''': '''hidden_dim'''}
def __init__( self: str , _SCREAMING_SNAKE_CASE: Optional[Dict] = None , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 256 , _SCREAMING_SNAKE_CASE: int = 1024 , _SCREAMING_SNAKE_CASE: str = "relu" , _SCREAMING_SNAKE_CASE: int = 6 , _SCREAMING_SNAKE_CASE: int = 10 , _SCREAMING_SNAKE_CASE: int = 8 , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 255 , _SCREAMING_SNAKE_CASE: int = 100 , _SCREAMING_SNAKE_CASE: float = 0.1 , _SCREAMING_SNAKE_CASE: float = 2.0 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: float = 5.0 , _SCREAMING_SNAKE_CASE: int = 12544 , _SCREAMING_SNAKE_CASE: float = 3.0 , _SCREAMING_SNAKE_CASE: float = 0.75 , _SCREAMING_SNAKE_CASE: float = 0.02 , _SCREAMING_SNAKE_CASE: float = 1.0 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: List[int] = [4, 8, 16, 32] , _SCREAMING_SNAKE_CASE: bool = None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> Dict:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
UpperCamelCase_ = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = backbone_config.pop("model_type" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
UpperCamelCase_ = backbone_config
UpperCamelCase_ = feature_size
UpperCamelCase_ = mask_feature_size
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = encoder_feedforward_dim
UpperCamelCase_ = activation_function
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = dim_feedforward
UpperCamelCase_ = pre_norm
UpperCamelCase_ = enforce_input_projection
UpperCamelCase_ = common_stride
UpperCamelCase_ = ignore_value
UpperCamelCase_ = num_queries
UpperCamelCase_ = no_object_weight
UpperCamelCase_ = class_weight
UpperCamelCase_ = mask_weight
UpperCamelCase_ = dice_weight
UpperCamelCase_ = train_num_points
UpperCamelCase_ = oversample_ratio
UpperCamelCase_ = importance_sample_ratio
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = feature_strides
UpperCamelCase_ = output_auxiliary_logits
UpperCamelCase_ = decoder_layers
super().__init__(**_SCREAMING_SNAKE_CASE )
@classmethod
def lowercase ( cls: Union[str, Any] , _SCREAMING_SNAKE_CASE: PretrainedConfig , **_SCREAMING_SNAKE_CASE: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return cls(
backbone_config=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def lowercase ( self: List[Any] ) -> Dict[str, any]:
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 328 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
_UpperCAmelCase = dataset.iloc[:, 1:2].values
_UpperCAmelCase = dataset.iloc[:, 2].values
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_UpperCAmelCase = PolynomialFeatures(degree=4)
_UpperCAmelCase = poly_reg.fit_transform(X)
_UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCAmelCase_ ( ) -> Dict:
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="red" )
plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 328 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = tempfile.mkdtemp()
UpperCamelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase_ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( self: str , **_SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Optional[int] ) -> int:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[Any] , **_SCREAMING_SNAKE_CASE: Any ) -> Union[str, Any]:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self: Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase_ = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
UpperCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
UpperCamelCase_ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = processor(text=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer(_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "lower newer"
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 328 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 1 |
from math import factorial
def lowerCAmelCase_ ( UpperCamelCase_ = 100 ) -> int:
return sum(int(UpperCamelCase_ ) for x in str(factorial(UpperCamelCase_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 328 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328 | 1 |
def lowerCAmelCase_ ( UpperCamelCase_ = 100 ) -> int:
UpperCamelCase_ = set()
UpperCamelCase_ = 0
UpperCamelCase_ = n + 1 # maximum limit
for a in range(2 , UpperCamelCase_ ):
for b in range(2 , UpperCamelCase_ ):
UpperCamelCase_ = a**b # calculates the current power
collect_powers.add(UpperCamelCase_ ) # adds the result to the set
return len(UpperCamelCase_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 328 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = '''blenderbot-small'''
_UpperCamelCase : Union[str, Any] = ['''past_key_values''']
_UpperCamelCase : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _SCREAMING_SNAKE_CASE: Any=50265 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: Optional[int]=8 , _SCREAMING_SNAKE_CASE: Tuple=2048 , _SCREAMING_SNAKE_CASE: Any=16 , _SCREAMING_SNAKE_CASE: Any=8 , _SCREAMING_SNAKE_CASE: List[Any]=2048 , _SCREAMING_SNAKE_CASE: str=16 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: int="gelu" , _SCREAMING_SNAKE_CASE: Tuple=512 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.02 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: int=1 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = use_cache
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( lowerCAmelCase_ ):
@property
def lowercase ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase_ = {0: "batch"}
UpperCamelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase ( self: str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = super().outputs
else:
UpperCamelCase_ = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
UpperCamelCase_ = seq_length if not self.use_past else 1
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase_ = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase_ , UpperCamelCase_ = common_inputs["input_ids"].shape
UpperCamelCase_ = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase_ , UpperCamelCase_ = self.num_attention_heads
UpperCamelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_ = decoder_seq_length + 3
UpperCamelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
UpperCamelCase_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
UpperCamelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
UpperCamelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase_ , UpperCamelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase_ = seqlen + 2
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
UpperCamelCase_ , UpperCamelCase_ = self.num_attention_heads
UpperCamelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_ = common_inputs["attention_mask"].dtype
UpperCamelCase_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase_ = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase_ = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
UpperCamelCase_ = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int ) -> Any:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 328 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( ) -> Union[str, Any]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase_ , default=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase_ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase_ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase_ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase_ , default=0 , help="cuda_id." , )
UpperCamelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if not len(UpperCamelCase_ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
UpperCamelCase_ , UpperCamelCase_ = imgs[0].size
UpperCamelCase_ = Image.new("RGB" , size=(cols * w, rows * h) )
UpperCamelCase_ , UpperCamelCase_ = grid.size
for i, img in enumerate(UpperCamelCase_ ):
grid.paste(UpperCamelCase_ , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="robotic cat with wings" , UpperCamelCase_=7.5 , UpperCamelCase_=50 , UpperCamelCase_=1 , UpperCamelCase_=42 , ) -> Any:
UpperCamelCase_ = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase_ )
UpperCamelCase_ = pipeline(
UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , generator=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , ).images
UpperCamelCase_ = int(math.sqrt(UpperCamelCase_ ) )
UpperCamelCase_ = image_grid(UpperCamelCase_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_UpperCAmelCase = parse_args()
# Load models and create wrapper for stable diffusion
_UpperCAmelCase = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_UpperCAmelCase = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_UpperCAmelCase = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_UpperCAmelCase = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_UpperCAmelCase = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_UpperCAmelCase = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_UpperCAmelCase = unet.to(torch.device('cuda', args.cuda_id))
_UpperCAmelCase = pipeline.to(unet.device)
_UpperCAmelCase , _UpperCAmelCase = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_UpperCAmelCase = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 328 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
_UpperCAmelCase = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
_UpperCAmelCase = '</w>'
_UpperCAmelCase = '@@ '
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = set()
UpperCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any]="<s>" , _SCREAMING_SNAKE_CASE: int="<pad>" , _SCREAMING_SNAKE_CASE: Optional[int]="</s>" , _SCREAMING_SNAKE_CASE: Dict="<unk>" , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Optional[Any]=None , **_SCREAMING_SNAKE_CASE: str , ) -> str:
"""simple docstring"""
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = do_lower_case
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
UpperCamelCase_ = json.load(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
UpperCamelCase_ = None
UpperCamelCase_ = None
else:
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
UpperCamelCase_ = merges_handle.read().split("\n" )[:-1]
UpperCamelCase_ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = {}
@property
def lowercase ( self: Optional[Any] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase ( self: str ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCamelCase_ = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ , UpperCamelCase_ = bigram
UpperCamelCase_ = []
UpperCamelCase_ = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase_ = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ = tuple(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = " ".join(_SCREAMING_SNAKE_CASE )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCamelCase_ = "\n" + BPE_TOKEN_MERGES
if word.endswith(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = word.replace(_SCREAMING_SNAKE_CASE , "" )
UpperCamelCase_ = word.replace(" " , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = word
return word
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int ) -> Union[str, Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCamelCase_ = text.lower()
UpperCamelCase_ = text.split()
UpperCamelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
return result
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ = " ".join(_SCREAMING_SNAKE_CASE )
# make sure @@ tokens are concatenated
UpperCamelCase_ = "".join(string.split(_SCREAMING_SNAKE_CASE ) )
return string
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + "\n" )
UpperCamelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase_ = token_index
writer.write(" ".join(_SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 328 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 1 |
def lowerCAmelCase_ ( UpperCamelCase_ = 50 ) -> int:
UpperCamelCase_ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 328 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 1 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase :
_UpperCamelCase : str
_UpperCamelCase : str = None
@staticmethod
def lowercase ( ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Optional[int] ) -> Tuple:
"""simple docstring"""
raise NotImplementedError
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> str:
"""simple docstring"""
raise NotImplementedError
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowercase ( cls: Union[str, Any] ) -> int:
"""simple docstring"""
return f'''`pip install {cls.pip_package or cls.name}`'''
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Any = '''optuna'''
@staticmethod
def lowercase ( ) -> str:
"""simple docstring"""
return is_optuna_available()
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: int ) -> List[str]:
"""simple docstring"""
return run_hp_search_optuna(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: int ) -> Optional[Any]:
"""simple docstring"""
return default_hp_space_optuna(_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Tuple = '''ray'''
_UpperCamelCase : Union[str, Any] = '''\'ray[tune]\''''
@staticmethod
def lowercase ( ) -> Dict:
"""simple docstring"""
return is_ray_available()
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return run_hp_search_ray(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Tuple:
"""simple docstring"""
return default_hp_space_ray(_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[str] = '''sigopt'''
@staticmethod
def lowercase ( ) -> Dict:
"""simple docstring"""
return is_sigopt_available()
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Optional[Any] ) -> int:
"""simple docstring"""
return run_hp_search_sigopt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Any ) -> Optional[int]:
"""simple docstring"""
return default_hp_space_sigopt(_SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = '''wandb'''
@staticmethod
def lowercase ( ) -> Union[str, Any]:
"""simple docstring"""
return is_wandb_available()
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , **_SCREAMING_SNAKE_CASE: Any ) -> Dict:
"""simple docstring"""
return run_hp_search_wandb(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str ) -> Any:
"""simple docstring"""
return default_hp_space_wandb(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCamelCase_ ) > 0:
UpperCamelCase_ = available_backends[0].name
if len(UpperCamelCase_ ) > 1:
logger.info(
F'''{len(UpperCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 328 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328 | 1 |
import pytest
_UpperCAmelCase = '__dummy_dataset1__'
_UpperCAmelCase = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def lowerCAmelCase_ ( ) -> Union[str, Any]:
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def lowerCAmelCase_ ( ) -> Dict:
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = dataset_loading_script_name
UpperCamelCase_ = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=UpperCamelCase_ )
UpperCamelCase_ = script_dir / F'''{script_name}.py'''
with open(UpperCamelCase_ , "w" ) as f:
f.write(UpperCamelCase_ )
return str(UpperCamelCase_ )
| 328 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
@staticmethod
def lowercase ( *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: Tuple ) -> Tuple:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
_UpperCamelCase : Optional[int] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = ObjectDetectionPipeline(model=__a , image_processor=__a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
import datasets
UpperCamelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
UpperCamelCase_ = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
UpperCamelCase_ = object_detector(__a , threshold=0.0 )
self.assertEqual(len(__a ) , len(__a ) )
for outputs in batch_outputs:
self.assertGreater(len(__a ) , 0 )
for detected_object in outputs:
self.assertEqual(
__a , {
"score": ANY(__a ),
"label": ANY(__a ),
"box": {"xmin": ANY(__a ), "ymin": ANY(__a ), "xmax": ANY(__a ), "ymax": ANY(__a )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
pass
@require_torch
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = 'hf-internal-testing/tiny-detr-mobilenetsv3'
UpperCamelCase_ = AutoModelForObjectDetection.from_pretrained(__a )
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(__a )
UpperCamelCase_ = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
UpperCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
] , )
UpperCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
[
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
{"score": 0.33_76, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}},
],
] , )
@require_torch
@slow
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = 'facebook/detr-resnet-50'
UpperCamelCase_ = AutoModelForObjectDetection.from_pretrained(__a )
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(__a )
UpperCamelCase_ = ObjectDetectionPipeline(model=__a , feature_extractor=__a )
UpperCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
UpperCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = 'facebook/detr-resnet-50'
UpperCamelCase_ = pipeline("object-detection" , model=__a )
UpperCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
UpperCamelCase_ = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
[
{"score": 0.99_82, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}},
{"score": 0.99_60, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}},
{"score": 0.99_55, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}},
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
],
] , )
@require_torch
@slow
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = 0.99_85
UpperCamelCase_ = 'facebook/detr-resnet-50'
UpperCamelCase_ = pipeline("object-detection" , model=__a )
UpperCamelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__a )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_88, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}},
{"score": 0.99_87, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = 'Narsil/layoutlmv3-finetuned-funsd'
UpperCamelCase_ = 0.99_93
UpperCamelCase_ = pipeline("object-detection" , model=__a , threshold=__a )
UpperCamelCase_ = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
{"score": 0.99_93, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}},
] , )
| 350 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=1 ) -> List[str]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=0 ) -> Union[str, Any]:
UpperCamelCase_ = []
for old_item in old_list:
UpperCamelCase_ = old_item.replace("in_layers.0" , "norm1" )
UpperCamelCase_ = new_item.replace("in_layers.2" , "conv1" )
UpperCamelCase_ = new_item.replace("out_layers.0" , "norm2" )
UpperCamelCase_ = new_item.replace("out_layers.3" , "conv2" )
UpperCamelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" )
UpperCamelCase_ = new_item.replace("skip_connection" , "conv_shortcut" )
UpperCamelCase_ = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=0 ) -> Optional[Any]:
UpperCamelCase_ = []
for old_item in old_list:
UpperCamelCase_ = old_item
UpperCamelCase_ = new_item.replace("norm.weight" , "group_norm.weight" )
UpperCamelCase_ = new_item.replace("norm.bias" , "group_norm.bias" )
UpperCamelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" )
UpperCamelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" )
UpperCamelCase_ = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[int]:
assert isinstance(lowercase__ , lowercase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase_ = old_checkpoint[path]
UpperCamelCase_ = old_tensor.shape[0] // 3
UpperCamelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase_ = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase_ = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase_ = query.reshape(lowercase__ )
UpperCamelCase_ = key.reshape(lowercase__ )
UpperCamelCase_ = value.reshape(lowercase__ )
for path in paths:
UpperCamelCase_ = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" )
UpperCamelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" )
UpperCamelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase_ = new_path.replace(replacement["old"] , replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase_ = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase_ = old_checkpoint[path["""old"""]]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
UpperCamelCase_ = {}
UpperCamelCase_ = checkpoint["""time_embed.0.weight"""]
UpperCamelCase_ = checkpoint["""time_embed.0.bias"""]
UpperCamelCase_ = checkpoint["""time_embed.2.weight"""]
UpperCamelCase_ = checkpoint["""time_embed.2.bias"""]
UpperCamelCase_ = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase_ = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase_ = checkpoint["""out.0.weight"""]
UpperCamelCase_ = checkpoint["""out.0.bias"""]
UpperCamelCase_ = checkpoint["""out.2.weight"""]
UpperCamelCase_ = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the output blocks only
UpperCamelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key]
for layer_id in range(lowercase__ )
}
for i in range(1 , lowercase__ ):
UpperCamelCase_ = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase_ = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key]
UpperCamelCase_ = [key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key]
if F'''input_blocks.{i}.0.op.weight''' in checkpoint:
UpperCamelCase_ = checkpoint[
F'''input_blocks.{i}.0.op.weight'''
]
UpperCamelCase_ = checkpoint[
F'''input_blocks.{i}.0.op.bias'''
]
continue
UpperCamelCase_ = renew_resnet_paths(lowercase__ )
UpperCamelCase_ = {"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
UpperCamelCase_ = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path, resnet_op] , config=lowercase__ )
if len(lowercase__ ):
UpperCamelCase_ = renew_attention_paths(lowercase__ )
UpperCamelCase_ = {
"""old""": F'''input_blocks.{i}.1''',
"""new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCamelCase_ = {
F'''input_blocks.{i}.1.qkv.bias''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''input_blocks.{i}.1.qkv.weight''': {
"""key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowercase__ , config=lowercase__ , )
UpperCamelCase_ = middle_blocks[0]
UpperCamelCase_ = middle_blocks[1]
UpperCamelCase_ = middle_blocks[2]
UpperCamelCase_ = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
UpperCamelCase_ = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
UpperCamelCase_ = renew_attention_paths(lowercase__ )
UpperCamelCase_ = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , attention_paths_to_split=lowercase__ , config=lowercase__ )
for i in range(lowercase__ ):
UpperCamelCase_ = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase_ = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase_ = [shave_segments(lowercase__ , 2 ) for name in output_blocks[i]]
UpperCamelCase_ = {}
for layer in output_block_layers:
UpperCamelCase_ = layer.split("." )[0], shave_segments(lowercase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowercase__ )
else:
UpperCamelCase_ = [layer_name]
if len(lowercase__ ) > 1:
UpperCamelCase_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key]
UpperCamelCase_ = [key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key]
UpperCamelCase_ = renew_resnet_paths(lowercase__ )
UpperCamelCase_ = renew_resnet_paths(lowercase__ )
UpperCamelCase_ = {"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
UpperCamelCase_ = checkpoint[
F'''output_blocks.{i}.{index}.conv.weight'''
]
UpperCamelCase_ = checkpoint[
F'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(lowercase__ ) == 2:
UpperCamelCase_ = []
if len(lowercase__ ):
UpperCamelCase_ = renew_attention_paths(lowercase__ )
UpperCamelCase_ = {
"""old""": F'''output_blocks.{i}.1''',
"""new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
UpperCamelCase_ = {
F'''output_blocks.{i}.1.qkv.bias''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
F'''output_blocks.{i}.1.qkv.weight''': {
"""key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"""query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"""value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=lowercase__ , )
else:
UpperCamelCase_ = renew_resnet_paths(lowercase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase_ = """.""".join(["output_blocks", str(lowercase__ ), path["old"]] )
UpperCamelCase_ = """.""".join(["up_blocks", str(lowercase__ ), "resnets", str(lowercase__ ), path["new"]] )
UpperCamelCase_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_UpperCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_UpperCAmelCase = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
_UpperCAmelCase = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
_UpperCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number | (1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number & ~(1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number ^ (1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> bool:
return ((number >> position) & 1) == 1
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ = 1000000 ) -> int:
UpperCamelCase_ = set(range(3 , __lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , __lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __lowerCAmelCase , __lowerCAmelCase ) ) )
UpperCamelCase_ = [float(__lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = False
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_UpperCAmelCase = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_UpperCAmelCase = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_UpperCAmelCase = reader.read()
_UpperCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_UpperCAmelCase = UNetaDModel(**config)
else:
_UpperCAmelCase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_UpperCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_UpperCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_UpperCAmelCase = config[key]
del config[key]
_UpperCAmelCase = [k.replace('UNetRes', '') for k in config['down_block_types']]
_UpperCAmelCase = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_UpperCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_UpperCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_UpperCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_UpperCAmelCase = param_value
_UpperCAmelCase = True
if not has_changed:
_UpperCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 354 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 328 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class _UpperCamelCase :
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: bytes ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = data
# Initialize hash values
UpperCamelCase_ = [
0X6A09E667,
0XBB67AE85,
0X3C6EF372,
0XA54FF53A,
0X510E527F,
0X9B05688C,
0X1F83D9AB,
0X5BE0CD19,
]
# Initialize round constants
UpperCamelCase_ = [
0X428A2F98,
0X71374491,
0XB5C0FBCF,
0XE9B5DBA5,
0X3956C25B,
0X59F111F1,
0X923F82A4,
0XAB1C5ED5,
0XD807AA98,
0X12835B01,
0X243185BE,
0X550C7DC3,
0X72BE5D74,
0X80DEB1FE,
0X9BDC06A7,
0XC19BF174,
0XE49B69C1,
0XEFBE4786,
0X0FC19DC6,
0X240CA1CC,
0X2DE92C6F,
0X4A7484AA,
0X5CB0A9DC,
0X76F988DA,
0X983E5152,
0XA831C66D,
0XB00327C8,
0XBF597FC7,
0XC6E00BF3,
0XD5A79147,
0X06CA6351,
0X14292967,
0X27B70A85,
0X2E1B2138,
0X4D2C6DFC,
0X53380D13,
0X650A7354,
0X766A0ABB,
0X81C2C92E,
0X92722C85,
0XA2BFE8A1,
0XA81A664B,
0XC24B8B70,
0XC76C51A3,
0XD192E819,
0XD6990624,
0XF40E3585,
0X106AA070,
0X19A4C116,
0X1E376C08,
0X2748774C,
0X34B0BCB5,
0X391C0CB3,
0X4ED8AA4A,
0X5B9CCA4F,
0X682E6FF3,
0X748F82EE,
0X78A5636F,
0X84C87814,
0X8CC70208,
0X90BEFFFA,
0XA4506CEB,
0XBEF9A3F7,
0XC67178F2,
]
UpperCamelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowercase ( _SCREAMING_SNAKE_CASE: bytes ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = B'\x80' + (B'\x00' * (63 - (len(_a ) + 8) % 64))
UpperCamelCase_ = struct.pack(">Q" , (len(_a ) * 8) )
return data + padding + big_endian_integer
def lowercase ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase_ = list(struct.unpack(">16L" , _a ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
UpperCamelCase_ = self.ror(_a , 6 ) ^ self.ror(_a , 11 ) ^ self.ror(_a , 25 )
UpperCamelCase_ = (e & f) ^ ((~e & 0XFFFFFFFF) & g)
UpperCamelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
UpperCamelCase_ = self.ror(_a , 2 ) ^ self.ror(_a , 13 ) ^ self.ror(_a , 22 )
UpperCamelCase_ = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase_ = (sa + maj) % 0X100000000
UpperCamelCase_ = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
UpperCamelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase_ = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
UpperCamelCase_ = ''.join([hex(_a )[2:].zfill(8 ) for value in self.hashes] )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int ) -> List[Any]:
"""simple docstring"""
return 0XFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
import hashlib
UpperCamelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_a ).hash , hashlib.shaaaa(_a ).hexdigest() )
def lowerCAmelCase_ ( ) -> Optional[Any]:
import doctest
doctest.testmod()
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = bytes(__a , "utf-8" )
print(SHAaaa(__a ).hash )
if __name__ == "__main__":
main()
| 355 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowercase ( self: Union[str, Any] ) -> dict:
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
UpperCamelCase_ = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Tuple ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase ( self: List[str] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( UpperCamelCase_ = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 328 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCamelCase ( lowerCamelCase__ ):
_UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self: int , _SCREAMING_SNAKE_CASE: Dict = True , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: Optional[Any] = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: int = True , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: List[str] = True , _SCREAMING_SNAKE_CASE: Tuple = 1 / 255 , _SCREAMING_SNAKE_CASE: int = True , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Tuple = True , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: List[str] = None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_ = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple = None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> List[Any]:
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] = None , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: Optional[Any] = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: List[Any] = None , _SCREAMING_SNAKE_CASE: Any = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 356 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 0 |
from __future__ import annotations
from collections.abc import Callable
_UpperCAmelCase = list[list[float | int]]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Matrix:
UpperCamelCase_ = len(__A )
UpperCamelCase_ = [[0 for _ in range(size + 1 )] for _ in range(__A )]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for row in range(__A ):
for col in range(__A ):
UpperCamelCase_ = matrix[row][col]
UpperCamelCase_ = vector[row][0]
UpperCamelCase_ = 0
UpperCamelCase_ = 0
while row < size and col < size:
# pivoting
UpperCamelCase_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__A , __A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase_ , UpperCamelCase_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __A ):
UpperCamelCase_ = augmented[rowa][col] / augmented[row][col]
UpperCamelCase_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __A ):
for row in range(__A ):
UpperCamelCase_ = augmented[row][col] / augmented[col][col]
for cola in range(__A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__A )
]
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Callable[[int], int]:
UpperCamelCase_ = len(__A )
UpperCamelCase_ = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCamelCase_ = [[0] for _ in range(__A )]
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for x_val, y_val in enumerate(__A ):
for col in range(__A ):
UpperCamelCase_ = (x_val + 1) ** (size - col - 1)
UpperCamelCase_ = y_val
UpperCamelCase_ = solve(__A , __A )
def interpolated_func(UpperCamelCase_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__A ) )
return interpolated_func
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( UpperCamelCase_ = question_function , UpperCamelCase_ = 10 ) -> int:
UpperCamelCase_ = [func(__A ) for x_val in range(1 , order + 1 )]
UpperCamelCase_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCamelCase_ = 0
UpperCamelCase_ = 42
UpperCamelCase_ = 42
for poly in polynomials:
UpperCamelCase_ = 1
while func(__A ) == poly(__A ):
x_val += 1
ret += poly(__A )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 357 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328 | 0 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
_UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Tuple:
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
UpperCamelCase_ = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase_ = test_hf_cache_home / """datasets"""
UpperCamelCase_ = test_hf_cache_home / """metrics"""
UpperCamelCase_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCamelCase_ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCamelCase_ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCamelCase_ ) )
UpperCamelCase_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCamelCase_ ) )
UpperCamelCase_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCamelCase_ ) )
@pytest.fixture(autouse=lowerCamelCase_ , scope="session" )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCamelCase_ )
@pytest.fixture
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCamelCase_ )
| 358 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCamelCase ( __A ):
_UpperCamelCase : Union[str, Any] = 'bert'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]=30522 , _SCREAMING_SNAKE_CASE: Dict=768 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: Any=3072 , _SCREAMING_SNAKE_CASE: Union[str, Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: str=512 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: int=0.02 , _SCREAMING_SNAKE_CASE: str=1e-12 , _SCREAMING_SNAKE_CASE: List[str]=0 , _SCREAMING_SNAKE_CASE: Dict="absolute" , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: int=None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowercase , **__lowercase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = use_cache
UpperCamelCase_ = classifier_dropout
class _UpperCamelCase ( __A ):
@property
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 359 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[int]=3 , _SCREAMING_SNAKE_CASE: Union[str, Any]=18 , _SCREAMING_SNAKE_CASE: Tuple=30 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Tuple=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: List[Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _SCREAMING_SNAKE_CASE: Dict=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _SCREAMING_SNAKE_CASE: Optional[int]=True , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_convert_rgb
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
UpperCamelCase_ = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
UpperCamelCase_ = []
for i in range(self.batch_size ):
UpperCamelCase_ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
UpperCamelCase_ = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
UpperCamelCase_ = [torch.from_numpy(snake_case__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _UpperCamelCase ( A_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case__ )
@property
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _UpperCamelCase ( A_ , unittest.TestCase ):
_UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case__ )
UpperCamelCase_ = 3
@property
def lowercase ( self: str ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "center_crop" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase_ = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 360 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 0 |
import heapq
import sys
import numpy as np
_UpperCAmelCase = tuple[int, int]
class _UpperCamelCase :
def __init__( self: Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = set()
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
return len(self.elements ) == 0
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any ) -> Any:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
UpperCamelCase_ = []
((UpperCamelCase_) , (UpperCamelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((UpperCamelCase_) , (UpperCamelCase_)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Tuple:
"""simple docstring"""
if item in self.set:
self.set.remove(_a )
UpperCamelCase_ = []
((UpperCamelCase_) , (UpperCamelCase_)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((UpperCamelCase_) , (UpperCamelCase_)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.elements[0][1]
def lowercase ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
((UpperCamelCase_) , (UpperCamelCase_)) = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
# euclidean distance
UpperCamelCase_ = np.array(lowerCAmelCase__ )
UpperCamelCase_ = np.array(lowerCAmelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# integer division by time variable
return consistent_heuristic(lowerCAmelCase__ , lowerCAmelCase__ ) // t
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
UpperCamelCase_ = g_function[start] + Wa * heuristics[i](lowerCAmelCase__ , lowerCAmelCase__ )
return ans
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = np.chararray((n, n) )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
UpperCamelCase_ = "*"
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
UpperCamelCase_ = "#"
UpperCamelCase_ = "-"
UpperCamelCase_ = back_pointer[goal]
while x != start:
((UpperCamelCase_) , (UpperCamelCase_)) = x
# print(x)
UpperCamelCase_ = "-"
UpperCamelCase_ = back_pointer[x]
UpperCamelCase_ = "-"
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
UpperCamelCase_ = back_pointer[goal]
while x != start:
print(lowerCAmelCase__ , end=" " )
UpperCamelCase_ = back_pointer[x]
print(lowerCAmelCase__ )
sys.exit()
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Tuple:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
for itera in range(lowerCAmelCase__ ):
open_list[itera].remove_element(lowerCAmelCase__ )
# print("s", s)
# print("j", j)
((UpperCamelCase_) , (UpperCamelCase_)) = s
UpperCamelCase_ = (x - 1, y)
UpperCamelCase_ = (x + 1, y)
UpperCamelCase_ = (x, y + 1)
UpperCamelCase_ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase__ )
UpperCamelCase_ = -1
UpperCamelCase_ = float("inf" )
if valid(lowerCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
UpperCamelCase_ = g_function[s] + 1
UpperCamelCase_ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase__ , key(lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase__ ):
if key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) <= Wa * key(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ):
open_list[j].put(
lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase_ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_UpperCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_UpperCAmelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
_UpperCAmelCase = make_common_ground()
_UpperCAmelCase = blocks_blk
# hyper parameters
_UpperCAmelCase = 1
_UpperCAmelCase = 1
_UpperCAmelCase = 2_0
_UpperCAmelCase = 3 # one consistent and two other inconsistent
# start and end destination
_UpperCAmelCase = (0, 0)
_UpperCAmelCase = (n - 1, n - 1)
_UpperCAmelCase = 1
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
UpperCamelCase_ = {start: 0, goal: float("inf" )}
UpperCamelCase_ = {start: -1, goal: -1}
UpperCamelCase_ = []
UpperCamelCase_ = set()
for i in range(lowerCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase_ = []
UpperCamelCase_ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , lowerCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
UpperCamelCase_ , UpperCamelCase_ = open_list[i].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_inad.append(lowerCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
UpperCamelCase_ = open_list[0].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_anchor.append(lowerCAmelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 361 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCamelCase_ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
UpperCamelCase_ = str(bin(lowercase__ ) )[2:] # remove the leading "0b"
UpperCamelCase_ = max(len(lowercase__ ) , len(lowercase__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
UpperCamelCase_ = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
return data[1:] + data[0]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = """"""
for i in range(len(_lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
UpperCamelCase_ = int("0b" + data[0] + data[-1] , 2 )
UpperCamelCase_ = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = message[:4]
UpperCamelCase_ = message[4:]
UpperCamelCase_ = apply_table(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ = xor(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ = apply_sbox(_lowerCamelCase , temp[:4] ) # noqa: E741
UpperCamelCase_ = apply_sbox(_lowerCamelCase , temp[4:] )
UpperCamelCase_ = """0""" * (2 - len(_lowerCamelCase )) + l # noqa: E741
UpperCamelCase_ = """0""" * (2 - len(_lowerCamelCase )) + r
UpperCamelCase_ = apply_table(l + r , _lowerCamelCase )
UpperCamelCase_ = xor(_lowerCamelCase , _lowerCamelCase )
return temp + right
if __name__ == "__main__":
_UpperCAmelCase = input('Enter 10 bit key: ')
_UpperCAmelCase = input('Enter 8 bit message: ')
_UpperCAmelCase = [6, 3, 7, 4, 8, 5, 1_0, 9]
_UpperCAmelCase = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_UpperCAmelCase = [2, 4, 3, 1]
_UpperCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7]
_UpperCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6]
_UpperCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1]
_UpperCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_UpperCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_UpperCAmelCase = apply_table(key, paa_table)
_UpperCAmelCase = temp[:5]
_UpperCAmelCase = temp[5:]
_UpperCAmelCase = left_shift(left)
_UpperCAmelCase = left_shift(right)
_UpperCAmelCase = apply_table(left + right, pa_table)
_UpperCAmelCase = left_shift(left)
_UpperCAmelCase = left_shift(right)
_UpperCAmelCase = left_shift(left)
_UpperCAmelCase = left_shift(right)
_UpperCAmelCase = apply_table(left + right, pa_table)
# encryption
_UpperCAmelCase = apply_table(message, IP)
_UpperCAmelCase = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase = temp[4:] + temp[:4]
_UpperCAmelCase = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_UpperCAmelCase = apply_table(CT, IP)
_UpperCAmelCase = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase = temp[4:] + temp[:4]
_UpperCAmelCase = function(expansion, sa, sa, keya, temp)
_UpperCAmelCase = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 363 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = 0
while b > 0:
if b & 1:
UpperCamelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 364 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 0 |
class _UpperCamelCase :
def __init__( self: Optional[Any] ) -> None:
"""simple docstring"""
UpperCamelCase_ = {} # Mapping from char to TrieNode
UpperCamelCase_ = False
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> None:
"""simple docstring"""
for word in words:
self.insert(_a )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Any ) -> None:
"""simple docstring"""
UpperCamelCase_ = self
for char in word:
if char not in curr.nodes:
UpperCamelCase_ = TrieNode()
UpperCamelCase_ = curr.nodes[char]
UpperCamelCase_ = True
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] ) -> bool:
"""simple docstring"""
UpperCamelCase_ = self
for char in word:
if char not in curr.nodes:
return False
UpperCamelCase_ = curr.nodes[char]
return curr.is_leaf
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> None:
"""simple docstring"""
def _delete(_SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Tuple ) -> bool:
if index == len(_a ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCamelCase_ = False
return len(curr.nodes ) == 0
UpperCamelCase_ = word[index]
UpperCamelCase_ = curr.nodes.get(_a )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCamelCase_ = _delete(_a , _a , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _a , 0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> None:
if node.is_leaf:
print(__a , end=" " )
for key, value in node.nodes.items():
print_words(__a , word + key )
def lowerCAmelCase_ ( ) -> bool:
UpperCamelCase_ = '''banana bananas bandana band apple all beast'''.split()
UpperCamelCase_ = TrieNode()
root.insert_many(__a )
# print_words(root, "")
assert all(root.find(__a ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> None:
print(str(__a ) , "works!" if passes else "doesn\'t work :(" )
def lowerCAmelCase_ ( ) -> None:
assert test_trie()
def lowerCAmelCase_ ( ) -> None:
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 365 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_UpperCAmelCase = random.Random()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=1.0 , UpperCamelCase_=None , UpperCamelCase_=None ) -> List[str]:
if rng is None:
UpperCamelCase_ = global_rng
UpperCamelCase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict=7 , _SCREAMING_SNAKE_CASE: int=400 , _SCREAMING_SNAKE_CASE: List[Any]=2000 , _SCREAMING_SNAKE_CASE: List[str]=24 , _SCREAMING_SNAKE_CASE: Tuple=24 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=16000 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , ) -> int:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = min_seq_length
UpperCamelCase_ = max_seq_length
UpperCamelCase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ = feature_size
UpperCamelCase_ = num_mel_bins
UpperCamelCase_ = padding_value
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = return_attention_mask
UpperCamelCase_ = do_normalize
def lowercase ( self: int ) -> int:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: Any=False ) -> int:
"""simple docstring"""
def _flatten(_SCREAMING_SNAKE_CASE: str ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
UpperCamelCase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = SpeechaTextFeatureExtractionTester(self )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase ( self: List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_ = np.asarray(__UpperCAmelCase )
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def lowercase ( self: List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase_ = feature_extractor(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = ["""longest""", """max_length""", """do_not_pad"""]
UpperCamelCase_ = [None, 16, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
UpperCamelCase_ = feature_extractor(
__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = [np.sum(__UpperCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = feature_extractor(
__UpperCAmelCase , padding="max_length" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase ( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = feature_extractor(
__UpperCAmelCase , padding="longest" , max_length=4 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase_ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ = feature_extractor(
__UpperCAmelCase , padding="longest" , max_length=16 , truncation=__UpperCAmelCase , return_tensors="np" , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase_ = inputs.input_features
UpperCamelCase_ = inputs.attention_mask
UpperCamelCase_ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowercase ( self: Optional[Any] ) -> Any:
"""simple docstring"""
import torch
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Optional[int] ) -> int:
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase_ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase_ = ds.sort("id" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowercase ( self: Tuple ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = np.array([
-1.57_45, -1.77_13, -1.70_20, -1.60_69, -1.22_50, -1.11_05, -0.90_72, -0.82_41,
-1.23_10, -0.80_98, -0.33_20, -0.41_01, -0.79_85, -0.49_96, -0.82_13, -0.91_28,
-1.04_20, -1.12_86, -1.04_40, -0.79_99, -0.84_05, -1.22_75, -1.54_43, -1.46_25,
] )
# fmt: on
UpperCamelCase_ = self._load_datasamples(1 )
UpperCamelCase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ = feature_extractor(__UpperCAmelCase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) )
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( UpperCamelCase_ = "." ) -> Tuple:
for dir_path, dir_names, filenames in os.walk(a_ ):
UpperCamelCase_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a_ )[1] in (".py", ".ipynb"):
yield os.path.join(a_ , a_ ).lstrip("./" )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Tuple:
return F'''{i * " "}*''' if i else "\n##"
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a_ ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(a_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def lowerCAmelCase_ ( UpperCamelCase_ = "." ) -> List[Any]:
UpperCamelCase_ = ""
for filepath in sorted(good_file_paths(a_ ) ):
UpperCamelCase_ = os.path.split(a_ )
if filepath != old_path:
UpperCamelCase_ = print_path(a_ , a_ )
UpperCamelCase_ = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCamelCase_ = F'''{filepath}/{filename}'''.replace(" " , "%20" )
UpperCamelCase_ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F'''{md_prefix(a_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('.')
| 367 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> List[Any]:
if config_name_or_path is None:
UpperCamelCase_ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCamelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCamelCase_ = question_encoder_name_or_path
UpperCamelCase_ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCamelCase_ = RagConfig.from_pretrained(_UpperCamelCase )
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
UpperCamelCase_ = AutoConfig.from_pretrained(_UpperCamelCase )
UpperCamelCase_ = gen_config
UpperCamelCase_ = question_encoder_config
UpperCamelCase_ = model_class.from_pretrained_question_encoder_generator(
_UpperCamelCase , _UpperCamelCase , config=_UpperCamelCase )
rag_model.save_pretrained(_UpperCamelCase )
# Sanity check.
model_class.from_pretrained(_UpperCamelCase )
# Save tokenizers.
UpperCamelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCamelCase_ = AutoTokenizer.from_pretrained(_UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 368 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase_ = np.zeros((n + 1,) )
UpperCamelCase_ = ya
UpperCamelCase_ = xa
for k in range(a__ ):
UpperCamelCase_ = y[k] + step_size * ode_func(a__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_UpperCAmelCase = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_UpperCAmelCase = {
'unc-nlp/lxmert-base-uncased': 5_1_2,
}
_UpperCAmelCase = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class _UpperCamelCase ( __UpperCamelCase ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = LxmertTokenizer
def __init__( self: int , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Optional[int]="[UNK]" , _SCREAMING_SNAKE_CASE: str="[SEP]" , _SCREAMING_SNAKE_CASE: List[str]="[PAD]" , _SCREAMING_SNAKE_CASE: Optional[Any]="[CLS]" , _SCREAMING_SNAKE_CASE: List[str]="[MASK]" , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: Any=None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCamelCase_ = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCamelCase_ = do_lower_case
UpperCamelCase_ = strip_accents
UpperCamelCase_ = tokenize_chinese_chars
UpperCamelCase_ = normalizer_class(**_lowerCAmelCase )
UpperCamelCase_ = do_lower_case
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: List[int] , _SCREAMING_SNAKE_CASE: Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 370 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _UpperCamelCase ( _UpperCAmelCase ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]=13 , _SCREAMING_SNAKE_CASE: List[str]=7 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: Any=32 , _SCREAMING_SNAKE_CASE: List[str]=5 , _SCREAMING_SNAKE_CASE: List[str]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=64 , _SCREAMING_SNAKE_CASE: Tuple="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=512 , _SCREAMING_SNAKE_CASE: List[Any]=16 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: Any=3 , _SCREAMING_SNAKE_CASE: List[Any]=4 , _SCREAMING_SNAKE_CASE: List[str]=None , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: str=1 , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = q_groups
UpperCamelCase_ = k_groups
UpperCamelCase_ = v_groups
UpperCamelCase_ = post_attention_groups
UpperCamelCase_ = intermediate_groups
UpperCamelCase_ = output_groups
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = model(lowercase_ , lowercase_ )
UpperCamelCase_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = SqueezeBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self: int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(UpperCamelCase_) = config_and_inputs
UpperCamelCase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Dict = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = False
def lowercase ( self: List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=lowercase_ , dim=37 )
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_ )
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ )
def lowercase ( self: Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ )
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ )
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ )
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ )
@slow
def lowercase ( self: Dict ) -> List[Any]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = SqueezeBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
UpperCamelCase_ = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
UpperCamelCase_ = model(lowercase_ )[0]
UpperCamelCase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase_ )
UpperCamelCase_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
| 371 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
if len(__a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(__a )
or left < -len(__a )
or right >= len(__a )
or right < -len(__a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
UpperCamelCase_ = (left + right) >> 1 # the middle
UpperCamelCase_ = find_max(__a , __a , __a ) # find max in range[left, mid]
UpperCamelCase_ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 350 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCamelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCamelCase_ = 0.01
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
UpperCamelCase_ = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[int]:
UpperCamelCase_ = "a" * 1000 + ".lock"
UpperCamelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCamelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _UpperCamelCase ( __snake_case ):
_UpperCamelCase : Tuple = '''canine'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[str]=768 , _SCREAMING_SNAKE_CASE: List[Any]=12 , _SCREAMING_SNAKE_CASE: int=12 , _SCREAMING_SNAKE_CASE: Optional[Any]=3072 , _SCREAMING_SNAKE_CASE: Any="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: str=16384 , _SCREAMING_SNAKE_CASE: Union[str, Any]=16 , _SCREAMING_SNAKE_CASE: Dict=0.02 , _SCREAMING_SNAKE_CASE: str=1e-12 , _SCREAMING_SNAKE_CASE: List[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=0XE000 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0XE001 , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: List[Any]=4 , _SCREAMING_SNAKE_CASE: str=8 , _SCREAMING_SNAKE_CASE: Union[str, Any]=16384 , _SCREAMING_SNAKE_CASE: Optional[Any]=128 , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
# Character config:
UpperCamelCase_ = downsampling_rate
UpperCamelCase_ = upsampling_kernel_size
UpperCamelCase_ = num_hash_functions
UpperCamelCase_ = num_hash_buckets
UpperCamelCase_ = local_transformer_stride
| 352 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
if len(_A ) < 2:
return collection
def circle_sort_util(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool:
UpperCamelCase_ = False
if low == high:
return swapped
UpperCamelCase_ = low
UpperCamelCase_ = high
while left < right:
if collection[left] > collection[right]:
UpperCamelCase_ = (
collection[right],
collection[left],
)
UpperCamelCase_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
UpperCamelCase_ = (
collection[right + 1],
collection[left],
)
UpperCamelCase_ = True
UpperCamelCase_ = low + int((high - low) / 2 )
UpperCamelCase_ = circle_sort_util(_A , _A , _A )
UpperCamelCase_ = circle_sort_util(_A , mid + 1 , _A )
return swapped or left_swap or right_swap
UpperCamelCase_ = True
while is_not_sorted is True:
UpperCamelCase_ = circle_sort_util(_A , 0 , len(_A ) - 1 )
return collection
if __name__ == "__main__":
_UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
_UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 353 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 0 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_UpperCAmelCase = 6_3_7_8_1_3_7.0
_UpperCAmelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5
_UpperCAmelCase = 6_3_7_8_1_3_7
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
UpperCamelCase_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCamelCase_ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
UpperCamelCase_ = atan((1 - flattening) * tan(radians(UpperCamelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCamelCase_ = haversine_distance(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCamelCase_ = (b_lata + b_lata) / 2
UpperCamelCase_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCamelCase_ = (sin(UpperCamelCase_ ) ** 2) * (cos(UpperCamelCase_ ) ** 2)
UpperCamelCase_ = cos(sigma / 2 ) ** 2
UpperCamelCase_ = (sigma - sin(UpperCamelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCamelCase_ = (cos(UpperCamelCase_ ) ** 2) * (sin(UpperCamelCase_ ) ** 2)
UpperCamelCase_ = sin(sigma / 2 ) ** 2
UpperCamelCase_ = (sigma + sin(UpperCamelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 328 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _UpperCamelCase :
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Any=10 , _SCREAMING_SNAKE_CASE: Tuple=3 , _SCREAMING_SNAKE_CASE: str=32 * 8 , _SCREAMING_SNAKE_CASE: Optional[int]=32 * 8 , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=64 , ) -> int:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = is_training
UpperCamelCase_ = use_auxiliary_loss
UpperCamelCase_ = num_queries
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_size
UpperCamelCase_ = max_size
UpperCamelCase_ = num_labels
UpperCamelCase_ = hidden_dim
UpperCamelCase_ = hidden_dim
def lowercase ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCamelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCamelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCamelCase_ = self.num_queries
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = [1, 1, 1, 1]
UpperCamelCase_ = self.num_channels
UpperCamelCase_ = 64
UpperCamelCase_ = 128
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
UpperCamelCase_ = self.hidden_dim
return config
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = output.encoder_hidden_states
UpperCamelCase_ = output.pixel_decoder_hidden_states
UpperCamelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) , config.decoder_layers )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str]=False ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase_ = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase_ = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(_SCREAMING_SNAKE_CASE: Optional[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase_ = model(pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model(
pixel_values=SCREAMING_SNAKE_CASE_ , pixel_mask=SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : int = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCamelCase : List[Any] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : int = False
_UpperCamelCase : Any = False
def lowercase ( self: Dict ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowercase ( self: List[Any] ) -> Dict:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase ( self: str ) -> Any:
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCamelCase_ = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = (self.model_tester.min_size,) * 2
UpperCamelCase_ = {
"""pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCamelCase_ = self.model_tester.get_config()
UpperCamelCase_ = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self: str ) -> str:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.all_model_classes[1]
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCamelCase_ = model(SCREAMING_SNAKE_CASE_ , mask_labels=SCREAMING_SNAKE_CASE_ , class_labels=SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_UpperCAmelCase = 1e-4
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self: int ) -> int:
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase_ = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase_ = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_ , (1, 3, 384, 384) )
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCamelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCamelCase_ = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
UpperCamelCase_ = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCamelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCamelCase_ = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ ) )
def lowercase ( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCamelCase_ = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCamelCase_ = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 355 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowercase ( self: Union[str, Any] ) -> dict:
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
UpperCamelCase_ = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Tuple ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase ( self: List[str] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( UpperCamelCase_ = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 328 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCamelCase ( __UpperCamelCase ):
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class _UpperCamelCase :
def __init__( self: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int=13 , _SCREAMING_SNAKE_CASE: str=64 , _SCREAMING_SNAKE_CASE: Any=3 , _SCREAMING_SNAKE_CASE: List[Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=16 , _SCREAMING_SNAKE_CASE: Dict=[128, 256, 384] , _SCREAMING_SNAKE_CASE: Optional[int]=[4, 6, 8] , _SCREAMING_SNAKE_CASE: Dict=[2, 3, 4] , _SCREAMING_SNAKE_CASE: Optional[Any]=[16, 16, 16] , _SCREAMING_SNAKE_CASE: Tuple=0 , _SCREAMING_SNAKE_CASE: str=[2, 2, 2] , _SCREAMING_SNAKE_CASE: Optional[Any]=[2, 2, 2] , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=True , _SCREAMING_SNAKE_CASE: Any=2 , ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = kernel_size
UpperCamelCase_ = stride
UpperCamelCase_ = padding
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = depths
UpperCamelCase_ = key_dim
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = patch_size
UpperCamelCase_ = attention_ratio
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = initializer_range
UpperCamelCase_ = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = num_labels
UpperCamelCase_ = initializer_range
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase ( self: Any ) -> int:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = LevitModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (self.image_size, self.image_size)
UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = LevitForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Dict = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Union[str, Any] = False
def lowercase ( self: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = LevitModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def lowercase ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase ( self: Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase ( self: Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
def check_hidden_states_output(_SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: str ):
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase_ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self: List[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase_ = False
UpperCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(_SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
UpperCamelCase_ = problem_type["""title"""]
UpperCamelCase_ = problem_type["""num_labels"""]
UpperCamelCase_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
UpperCamelCase_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
UpperCamelCase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCamelCase_ = inputs["""labels"""].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as warning_list:
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def lowercase ( self: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = LevitModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([1.04_48, -0.37_45, -1.83_17] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 356 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> Any:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
UpperCamelCase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCamelCase_ = requests.get(A__ , headers=A__ ).json()
UpperCamelCase_ = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
UpperCamelCase_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
UpperCamelCase_ = requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
UpperCamelCase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
UpperCamelCase_ = requests.get(A__ , headers=A__ ).json()
UpperCamelCase_ = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
UpperCamelCase_ = math.ceil((result["total_count"] - 100) / 100 )
for i in range(A__ ):
UpperCamelCase_ = requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = None
if token is not None:
UpperCamelCase_ = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
UpperCamelCase_ = requests.get(A__ , headers=A__ , allow_redirects=A__ )
UpperCamelCase_ = result.headers["Location"]
UpperCamelCase_ = requests.get(A__ , allow_redirects=A__ )
UpperCamelCase_ = os.path.join(A__ , F'''{artifact_name}.zip''' )
with open(A__ , "wb" ) as fp:
fp.write(response.content )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> Dict:
UpperCamelCase_ = []
UpperCamelCase_ = []
UpperCamelCase_ = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
UpperCamelCase_ = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase_ = line[: line.index(": " )]
UpperCamelCase_ = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
UpperCamelCase_ = line[len("FAILED " ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
UpperCamelCase_ = line
if len(A__ ) != len(A__ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` '''
F'''and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
UpperCamelCase_ = None
if job_name and job_links:
UpperCamelCase_ = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase_ = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> int:
UpperCamelCase_ = []
UpperCamelCase_ = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
UpperCamelCase_ = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase_ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = test.split("::" )[0]
if test.startswith("tests/models/" ):
UpperCamelCase_ = test.split("/" )[2]
else:
UpperCamelCase_ = None
return test
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_=None ) -> List[Any]:
UpperCamelCase_ = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase_ = [x for x in logs if x[2] is not None]
UpperCamelCase_ = {x[2] for x in logs}
UpperCamelCase_ = {}
for test in tests:
UpperCamelCase_ = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase_ = counter.most_common()
UpperCamelCase_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase_ = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase_ = {"count": n_errors, "errors": error_counts}
UpperCamelCase_ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=A__ ) )
return r
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = "| no. | error | status |"
UpperCamelCase_ = "|-:|:-|:-|"
UpperCamelCase_ = [header, sep]
for error in reduced_by_error:
UpperCamelCase_ = reduced_by_error[error]["count"]
UpperCamelCase_ = F'''| {count} | {error[:100]} | |'''
lines.append(A__ )
return "\n".join(A__ )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Any:
UpperCamelCase_ = "| model | no. of errors | major error | count |"
UpperCamelCase_ = "|-:|-:|-:|-:|"
UpperCamelCase_ = [header, sep]
for model in reduced_by_model:
UpperCamelCase_ = reduced_by_model[model]["count"]
UpperCamelCase_ , UpperCamelCase_ = list(reduced_by_model[model]["errors"].items() )[0]
UpperCamelCase_ = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_UpperCAmelCase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_UpperCAmelCase = get_job_links(args.workflow_run_id, token=args.token)
_UpperCAmelCase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_UpperCAmelCase = k.find(' / ')
_UpperCAmelCase = k[index + len(' / ') :]
_UpperCAmelCase = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_UpperCAmelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_UpperCAmelCase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_UpperCAmelCase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_UpperCAmelCase = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_UpperCAmelCase = reduce_by_error(errors)
_UpperCAmelCase = reduce_by_model(errors)
_UpperCAmelCase = make_github_table(reduced_by_error)
_UpperCAmelCase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 357 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_UpperCAmelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
_UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ) -> Union[str, Any]:
UpperCamelCase_ = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ) -> List[Any]:
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase_ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase_ = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ) -> Any:
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase_ = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def lowerCAmelCase_ ( ) -> List[Any]:
assert med.median_filter(_UpperCamelCase , 3 ).any()
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase_ , UpperCamelCase_ = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def lowerCAmelCase_ ( UpperCamelCase_ = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
UpperCamelCase_ = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( UpperCamelCase_ = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
UpperCamelCase_ = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase_ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCamelCase_ = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = image[x_coordinate][y_coordinate]
UpperCamelCase_ = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase_ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase_ = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 359 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = IFInpaintingPipeline
_UpperCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase ( self: str ) -> Dict:
"""simple docstring"""
return self._get_dummy_components()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int=0 ) -> List[str]:
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(__lowerCAmelCase )
else:
UpperCamelCase_ = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 360 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
_UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase = 'transformer'
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase = f'''{prefix}.embeddings.{w}.weight'''
_UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase = f'''{prefix}.embeddings.LayerNorm.{w}'''
_UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
_UpperCAmelCase = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f'''lm_head.dense.{w}''']
_UpperCAmelCase = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f'''{prefix}.ln_f.{w}''']
_UpperCAmelCase = state_dict['lm_head.weight']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 361 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
UpperCamelCase_ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict[str, str]:
UpperCamelCase_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
UpperCamelCase_ = remove_duplicates(key.upper() )
UpperCamelCase_ = len(lowercase_ )
# First fill cipher with key characters
UpperCamelCase_ = {alphabet[i]: char for i, char in enumerate(lowercase_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(lowercase_ ) , 26 ):
UpperCamelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCamelCase_ = alphabet[i - offset]
UpperCamelCase_ = char
return cipher_alphabet
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
return "".join(cipher_map.get(lowercase_ , lowercase_ ) for ch in message.upper() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(lowercase_ , lowercase_ ) for ch in message.upper() )
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase_ = input("Enter message to encode or decode: " ).strip()
UpperCamelCase_ = input("Enter keyword: " ).strip()
UpperCamelCase_ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCamelCase_ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCamelCase_ = create_cipher_map(lowercase_ )
print(func(lowercase_ , lowercase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 362 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase_ )
class _UpperCamelCase :
_UpperCamelCase : str
_UpperCamelCase : str
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[str] = None
@dataclass(frozen=lowerCAmelCase_ )
class _UpperCamelCase :
_UpperCamelCase : List[int]
_UpperCamelCase : Optional[List[int]] = None
_UpperCamelCase : Optional[List[int]] = None
_UpperCamelCase : Optional[Union[int, float]] = None
_UpperCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : List[InputFeatures]
def __init__( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: bool = False , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCamelCase_ = torch.load(_a )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCamelCase_ = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("Training examples: %s" , len(_a ) )
UpperCamelCase_ = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("Saving features into cached file %s" , _a )
torch.save(self.features , _a )
def __len__( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self: int , _SCREAMING_SNAKE_CASE: int ) -> Union[str, Any]:
"""simple docstring"""
return self.features[i]
def lowercase ( self: str ) -> Any:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase :
_UpperCamelCase : List[InputFeatures]
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[int] = 128 , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: bool = False , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
UpperCamelCase_ = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.dataset
def __len__( self: int ) -> Optional[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple ) -> Dict:
"""simple docstring"""
return self.features[i]
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.label_list
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , "heuristics_train_set.txt" ) ) , "train" )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Any:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = []
for i, line in enumerate(_a ):
if i == 0:
continue
UpperCamelCase_ = "%s-%s" % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any:
UpperCamelCase_ = {label: i for i, label in enumerate(UpperCamelCase__ )}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase__ ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
_UpperCAmelCase = {
'hans': 3,
}
_UpperCAmelCase = {
'hans': HansProcessor,
}
| 363 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = TextToVideoSDPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_UpperCamelCase : List[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCamelCase_ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
UpperCamelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCamelCase_ = CLIPTextModel(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def lowercase ( self: Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = TextToVideoSDPipeline(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = "np"
UpperCamelCase_ = sd_pipe(**_SCREAMING_SNAKE_CASE ).frames
UpperCamelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCamelCase_ = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self: Tuple ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Tuple ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase ( self: str ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def lowercase ( self: List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase ( self: Tuple ) -> Any:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCamelCase_ = pipe.to("cuda" )
UpperCamelCase_ = "Spiderman is surfing"
UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="pt" ).frames
UpperCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def lowercase ( self: str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCamelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCamelCase_ = pipe.to("cuda" )
UpperCamelCase_ = "Spiderman is surfing"
UpperCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="pt" ).frames
UpperCamelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 364 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCAmelCase = HfArgumentParser(InitializationArguments)
_UpperCAmelCase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCAmelCase = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
_UpperCAmelCase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCAmelCase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 365 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_UpperCAmelCase = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_UpperCAmelCase = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_UpperCAmelCase = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_UpperCAmelCase = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_UpperCAmelCase = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_UpperCAmelCase = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Dict = FLAX_MODEL_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModel)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Optional[int] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : int = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _UpperCamelCase ( _BaseAutoModelClass ):
_UpperCamelCase : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_UpperCAmelCase = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ = 1000 ) -> int:
UpperCamelCase_ = 2**power
UpperCamelCase_ = str(UpperCamelCase__ )
UpperCamelCase_ = list(UpperCamelCase__ )
UpperCamelCase_ = 0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
_UpperCAmelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_UpperCAmelCase = solution(power)
print('Sum of the digits is: ', result)
| 367 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number | (1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number & ~(1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return number ^ (1 << position)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> bool:
return ((number >> position) & 1) == 1
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
UpperCamelCase_ = MobileNetVaConfig(layer_norm_eps=0.0_01 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCamelCase_ = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , _snake_case )
if matches:
UpperCamelCase_ = float(matches[1] )
UpperCamelCase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCamelCase_ = 1001
UpperCamelCase_ = "imagenet-1k-id2label.json"
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type="dataset" ) , "r" ) )
UpperCamelCase_ = {int(_snake_case ) + 1: v for k, v in idalabel.items()}
UpperCamelCase_ = "background"
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase_ = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
UpperCamelCase_ = get_mobilenet_va_config(_snake_case )
# Load 🤗 model
UpperCamelCase_ = MobileNetVaForImageClassification(_snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(_snake_case , _snake_case , _snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCamelCase_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCamelCase_ = model(**_snake_case )
UpperCamelCase_ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
UpperCamelCase_ = torch.tensor([-4.17_39, -1.12_33, 3.12_05] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCamelCase_ = torch.tensor([-3.94_40, -2.31_41, -0.33_33] )
else:
UpperCamelCase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing to the hub..." )
UpperCamelCase_ = "google/" + model_name
image_processor.push_to_hub(_snake_case )
model.push_to_hub(_snake_case )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='mobilenet_v1_1.0_224',
type=str,
help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.',
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 369 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_UpperCAmelCase = logging.getLogger(__name__)
_UpperCAmelCase = tf.data.AUTOTUNE
def lowerCAmelCase_ ( ):
UpperCamelCase_ = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=UpperCamelCase_ , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=UpperCamelCase_ , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=UpperCamelCase_ , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=UpperCamelCase_ , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=UpperCamelCase_ , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=UpperCamelCase_ , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=UpperCamelCase_ , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=UpperCamelCase_ , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=UpperCamelCase_ , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=UpperCamelCase_ , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=UpperCamelCase_ , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=UpperCamelCase_ , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=UpperCamelCase_ , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=UpperCamelCase_ , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=UpperCamelCase_ , help="Model ID to upload to on the Hugging Face Hub." )
UpperCamelCase_ = parser.parse_args()
return args
def lowerCAmelCase_ ( UpperCamelCase_ ):
try:
if args.tpu_name:
UpperCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase_ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(UpperCamelCase_ )
tf.tpu.experimental.initialize_tpu_system(UpperCamelCase_ )
return tpu
def lowerCAmelCase_ ( UpperCamelCase_ ):
UpperCamelCase_ = 0
for file in file_list:
UpperCamelCase_ = file.split("/" )[-1]
UpperCamelCase_ = re.search(r"-\d+-(\d+)\.tfrecord" , UpperCamelCase_ ).group(1 )
UpperCamelCase_ = int(UpperCamelCase_ )
num_samples += sample_count
return num_samples
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCamelCase_ = count_samples(UpperCamelCase_ )
UpperCamelCase_ = tf.data.Dataset.from_tensor_slices(UpperCamelCase_ )
if shuffle:
UpperCamelCase_ = dataset.shuffle(len(UpperCamelCase_ ) )
UpperCamelCase_ = tf.data.TFRecordDataset(UpperCamelCase_ , num_parallel_reads=UpperCamelCase_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase_ = dataset.apply(tf.data.experimental.assert_cardinality(UpperCamelCase_ ) )
UpperCamelCase_ = dataset.map(UpperCamelCase_ , num_parallel_calls=UpperCamelCase_ )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase_ = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase_ = dataset.batch(UpperCamelCase_ , drop_remainder=UpperCamelCase_ )
UpperCamelCase_ = dataset.map(UpperCamelCase_ , num_parallel_calls=UpperCamelCase_ )
UpperCamelCase_ = dataset.prefetch(UpperCamelCase_ )
return dataset
def lowerCAmelCase_ ( UpperCamelCase_ ):
if not args.no_tpu:
UpperCamelCase_ = initialize_tpu(UpperCamelCase_ )
UpperCamelCase_ = tf.distribute.TPUStrategy(UpperCamelCase_ )
else:
UpperCamelCase_ = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase_ = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase_ = tokenizer.vocab_size
UpperCamelCase_ = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCamelCase_ = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCamelCase_ = count_samples(UpperCamelCase_ )
UpperCamelCase_ = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase_ = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase_ = TFAutoModelForMaskedLM.from_config(UpperCamelCase_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase_ = create_optimizer(
num_train_steps=UpperCamelCase_ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=UpperCamelCase_ , metrics=["accuracy"] )
def decode_fn(UpperCamelCase_ ):
UpperCamelCase_ = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(UpperCamelCase_ , UpperCamelCase_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase_ = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase_ , mlm_probability=args.mlm_probability , mlm=UpperCamelCase_ , return_tensors="tf" )
def mask_with_collator(UpperCamelCase_ ):
# TF really needs an isin() function
UpperCamelCase_ = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
UpperCamelCase_ = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(UpperCamelCase_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=UpperCamelCase_ , )
return batch
UpperCamelCase_ = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase_ = prepare_dataset(
UpperCamelCase_ , decode_fn=UpperCamelCase_ , mask_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase_ = prepare_dataset(
UpperCamelCase_ , decode_fn=UpperCamelCase_ , mask_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , )
UpperCamelCase_ = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=UpperCamelCase_ ) )
model.fit(
UpperCamelCase_ , validation_data=UpperCamelCase_ , epochs=args.num_epochs , callbacks=UpperCamelCase_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_UpperCAmelCase = parse_args()
main(args)
| 370 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCamelCase ( _a ):
_UpperCamelCase : Dict = '''realm'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: List[Any]=768 , _SCREAMING_SNAKE_CASE: List[Any]=128 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Tuple=12 , _SCREAMING_SNAKE_CASE: Any=8 , _SCREAMING_SNAKE_CASE: str=3072 , _SCREAMING_SNAKE_CASE: Optional[Any]="gelu_new" , _SCREAMING_SNAKE_CASE: Any=0.1 , _SCREAMING_SNAKE_CASE: List[str]=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=512 , _SCREAMING_SNAKE_CASE: Dict=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-12 , _SCREAMING_SNAKE_CASE: int=256 , _SCREAMING_SNAKE_CASE: List[Any]=10 , _SCREAMING_SNAKE_CASE: int=1e-3 , _SCREAMING_SNAKE_CASE: Tuple=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=320 , _SCREAMING_SNAKE_CASE: int=13353718 , _SCREAMING_SNAKE_CASE: Optional[int]=5000 , _SCREAMING_SNAKE_CASE: Optional[int]=1 , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , **_SCREAMING_SNAKE_CASE: List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
# Common config
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = retriever_proj_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_candidates
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = initializer_range
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = layer_norm_eps
# Reader config
UpperCamelCase_ = span_hidden_size
UpperCamelCase_ = max_span_width
UpperCamelCase_ = reader_layer_norm_eps
UpperCamelCase_ = reader_beam_size
UpperCamelCase_ = reader_seq_len
# Retrieval config
UpperCamelCase_ = num_block_records
UpperCamelCase_ = searcher_beam_size
| 371 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_UpperCAmelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
for attribute in key.split("." ):
UpperCamelCase_ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
UpperCamelCase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
UpperCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = []
UpperCamelCase_ = fairseq_model.state_dict()
UpperCamelCase_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(__lowerCAmelCase )[0].split("." )[-2]
UpperCamelCase_ = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
UpperCamelCase_ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase_ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase_ = '''weight'''
else:
UpperCamelCase_ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
UpperCamelCase_ = full_name.split("conv_layers." )[-1]
UpperCamelCase_ = name.split("." )
UpperCamelCase_ = int(items[0] )
UpperCamelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Dict:
UpperCamelCase_ = torch.load(__lowerCAmelCase )
UpperCamelCase_ = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase_ = WavLMOrig(__lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase_ = WavLMConfig.from_pretrained(__lowerCAmelCase )
else:
UpperCamelCase_ = WavLMConfig()
UpperCamelCase_ = WavLMModel(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavlm.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 350 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 0 |
from heapq import heappop, heappush
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> tuple[float | int, list[tuple[int, int]]]:
UpperCamelCase_ = grid.shape
UpperCamelCase_ = [-1, 1, 0, 0]
UpperCamelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCamelCase_ = [(0, source)], set()
UpperCamelCase_ = np.full((rows, cols) , np.inf )
UpperCamelCase_ = 0
UpperCamelCase_ = np.empty((rows, cols) , dtype=_UpperCAmelCase )
UpperCamelCase_ = None
while queue:
(UpperCamelCase_) = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCamelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCamelCase_ = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
UpperCamelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCamelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
UpperCamelCase_ = dist + 1
UpperCamelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328 | 0 |
"""simple docstring"""
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_UpperCAmelCase = sys.version_info >= (3, 1_0)
def lowerCAmelCase_ ( UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : int
_UpperCamelCase : float
_UpperCamelCase : str
_UpperCamelCase : bool
@dataclass
class _UpperCamelCase :
_UpperCamelCase : int = 4_2
_UpperCamelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : bool = False
_UpperCamelCase : bool = True
_UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = """titi"""
_UpperCamelCase : Dict = """toto"""
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Any = """titi"""
_UpperCamelCase : Optional[Any] = """toto"""
_UpperCamelCase : int = 4_2
@dataclass
class _UpperCamelCase :
_UpperCamelCase : BasicEnum = "toto"
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = BasicEnum(self.foo )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : MixedTypeEnum = "toto"
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = MixedTypeEnum(self.foo )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[float] = field(default=lowerCAmelCase_ , metadata={'''help''': '''help message'''} )
_UpperCamelCase : Optional[str] = None
_UpperCamelCase : Optional[List[str]] = list_field(default=[] )
_UpperCamelCase : Optional[List[int]] = list_field(default=[] )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : List[int] = list_field(default=[] )
_UpperCamelCase : List[int] = list_field(default=[1, 2, 3] )
_UpperCamelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
_UpperCamelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : List[int] = field()
_UpperCamelCase : str = field()
_UpperCamelCase : BasicEnum = field()
def lowercase ( self: Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ = BasicEnum(self.required_enum )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : int
_UpperCamelCase : "BasicEnum" = field()
_UpperCamelCase : "Optional[bool]" = None
_UpperCamelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} )
_UpperCamelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] )
if is_python_no_less_than_3_10:
@dataclass
class _UpperCamelCase :
_UpperCamelCase : bool = False
_UpperCamelCase : bool = True
_UpperCamelCase : bool | None = None
@dataclass
class _UpperCamelCase :
_UpperCamelCase : int | None = None
_UpperCamelCase : float | None = field(default=lowerCAmelCase_ , metadata={'''help''': '''help message'''} )
_UpperCamelCase : str | None = None
_UpperCamelCase : list[str] | None = list_field(default=[] )
_UpperCamelCase : list[int] | None = list_field(default=[] )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Any:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase_ = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != "container"}
UpperCamelCase_ = {k: v for k, v in vars(__SCREAMING_SNAKE_CASE ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __SCREAMING_SNAKE_CASE ) and yy.get("choices" , __SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__SCREAMING_SNAKE_CASE ) , yy["type"](__SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument("--flag" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs="?" )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase_ ) , ) = parser.parse_args_into_dataclasses(__SCREAMING_SNAKE_CASE , look_for_args_file=__SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def lowercase ( self: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=__SCREAMING_SNAKE_CASE , help="help message" )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs="?" )
expected.add_argument("--baz" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , const=__SCREAMING_SNAKE_CASE , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__SCREAMING_SNAKE_CASE , dest="baz" )
expected.add_argument("--opt" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , opt=__SCREAMING_SNAKE_CASE ) )
def lowercase ( self: Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase_ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowercase ( self: Any ) -> List[Any]:
"""simple docstring"""
@dataclass
class _UpperCamelCase :
_UpperCamelCase : Literal["titi", "toto", 4_2] = "toto"
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase_ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase_ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(
__SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase_ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def lowercase ( self: int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--baz" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__SCREAMING_SNAKE_CASE )
expected.add_argument("--des" , nargs="+" , default=[] , type=__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_args([] )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=__SCREAMING_SNAKE_CASE , bar=__SCREAMING_SNAKE_CASE , baz=__SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
UpperCamelCase_ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def lowercase ( self: Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument("--required_str" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__SCREAMING_SNAKE_CASE , )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__SCREAMING_SNAKE_CASE , )
expected.add_argument("--opt" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=__SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__SCREAMING_SNAKE_CASE )
self.argparsersEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase_ = parser.parse_dict(__SCREAMING_SNAKE_CASE )[0]
UpperCamelCase_ = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__SCREAMING_SNAKE_CASE , parser.parse_dict , __SCREAMING_SNAKE_CASE , allow_extra_keys=__SCREAMING_SNAKE_CASE )
def lowercase ( self: Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , "temp_json" )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase_ = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = os.path.join(__SCREAMING_SNAKE_CASE , "temp_yaml" )
os.mkdir(__SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase_ = BasicExample(**__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple ) -> str:
"""simple docstring"""
UpperCamelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 352 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( snake_case_ ):
_UpperCamelCase : Optional[Any] = ['''input_features''']
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: str=80 , _SCREAMING_SNAKE_CASE: List[Any]=16000 , _SCREAMING_SNAKE_CASE: Tuple=160 , _SCREAMING_SNAKE_CASE: int=30 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: str=0.0 , _SCREAMING_SNAKE_CASE: int=False , **_SCREAMING_SNAKE_CASE: int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = n_fft
UpperCamelCase_ = hop_length
UpperCamelCase_ = chunk_length
UpperCamelCase_ = chunk_length * sampling_rate
UpperCamelCase_ = self.n_samples // hop_length
UpperCamelCase_ = sampling_rate
UpperCamelCase_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_SCREAMING_SNAKE_CASE , norm="slaney" , mel_scale="slaney" , )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase_ = spectrogram(
_SCREAMING_SNAKE_CASE , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
UpperCamelCase_ = log_spec[:, :-1]
UpperCamelCase_ = np.maximum(_SCREAMING_SNAKE_CASE , log_spec.max() - 8.0 )
UpperCamelCase_ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase ( _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[int] = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
UpperCamelCase_ = np.array(_SCREAMING_SNAKE_CASE , np.intaa )
UpperCamelCase_ = []
for vector, length in zip(_SCREAMING_SNAKE_CASE , attention_mask.sum(-1 ) ):
UpperCamelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
UpperCamelCase_ = padding_value
normed_input_values.append(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: int , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Dict = True , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: List[str] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: List[Any] = "max_length" , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: str = None , _SCREAMING_SNAKE_CASE: List[Any] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase_ = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase_ = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
UpperCamelCase_ = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ = [np.asarray([raw_speech] ).T]
UpperCamelCase_ = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
UpperCamelCase_ = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=max_length if max_length else self.n_samples , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase_ = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
UpperCamelCase_ = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
UpperCamelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
UpperCamelCase_ = [self._np_extract_fbank_features(_SCREAMING_SNAKE_CASE ) for waveform in input_features[0]]
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase_ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase_ = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE )
return padded_inputs
def lowercase ( self: Union[str, Any] ) -> Dict[str, Any]:
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 353 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 0 |
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 354 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = IFImgaImgSuperResolutionPipeline
_UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_UpperCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[int]=0 ) -> List[Any]:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = floats_tensor((1, 3, 16, 16) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase ( self: int ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
self._test_save_load_local()
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 328 | 0 |
"""simple docstring"""
from math import factorial, radians
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 18 , UpperCamelCase_ = 10 ) -> float:
UpperCamelCase_ = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCamelCase_ = radians(_A )
UpperCamelCase_ = angle_in_radians
UpperCamelCase_ = 3
UpperCamelCase_ = -1
for _ in range(_A ):
result += (b * (angle_in_radians**a)) / factorial(_A )
UpperCamelCase_ = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_A , _A )
if __name__ == "__main__":
__import__('doctest').testmod()
| 355 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_UpperCAmelCase = {'UserAgent': UserAgent().random}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> dict:
UpperCamelCase_ = script.contents[0]
UpperCamelCase_ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase_ = self.get_json()
def lowercase ( self: Union[str, Any] ) -> dict:
"""simple docstring"""
UpperCamelCase_ = requests.get(self.url , headers=_SCREAMING_SNAKE_CASE ).text
UpperCamelCase_ = BeautifulSoup(_SCREAMING_SNAKE_CASE , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self: Tuple ) -> str:
"""simple docstring"""
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self: List[Any] ) -> str:
"""simple docstring"""
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["username"]
@property
def lowercase ( self: int ) -> str:
"""simple docstring"""
return self.user_data["full_name"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["biography"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["business_email"]
@property
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
return self.user_data["external_url"]
@property
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def lowercase ( self: List[str] ) -> int:
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def lowercase ( self: Optional[int] ) -> bool:
"""simple docstring"""
return self.user_data["is_verified"]
@property
def lowercase ( self: List[str] ) -> bool:
"""simple docstring"""
return self.user_data["is_private"]
def lowerCAmelCase_ ( UpperCamelCase_ = "github" ) -> None:
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase_ = InstagramUser(UpperCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , UpperCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 328 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class _UpperCamelCase ( snake_case_ ):
_UpperCamelCase : int = '''lilt'''
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: int=30522 , _SCREAMING_SNAKE_CASE: List[str]=768 , _SCREAMING_SNAKE_CASE: Optional[int]=12 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Dict=3072 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=512 , _SCREAMING_SNAKE_CASE: Dict=2 , _SCREAMING_SNAKE_CASE: Tuple=0.02 , _SCREAMING_SNAKE_CASE: Optional[int]=1e-12 , _SCREAMING_SNAKE_CASE: str=0 , _SCREAMING_SNAKE_CASE: List[Any]="absolute" , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=4 , _SCREAMING_SNAKE_CASE: Tuple=1024 , **_SCREAMING_SNAKE_CASE: int , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_act
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = classifier_dropout
UpperCamelCase_ = channel_shrink_ratio
UpperCamelCase_ = max_ad_position_embeddings
| 356 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = FunnelTokenizer
_UpperCamelCase : List[str] = FunnelTokenizerFast
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Dict = True
def lowercase ( self: Any ) -> str:
"""simple docstring"""
super().setUp()
UpperCamelCase_ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self: List[Any] , **_SCREAMING_SNAKE_CASE: Any ) -> int:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase ( self: str , **_SCREAMING_SNAKE_CASE: str ) -> Optional[int]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = '''UNwant\u00E9d,running'''
UpperCamelCase_ = '''unwanted, running'''
return input_text, output_text
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class(self.vocab_file )
UpperCamelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCamelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def lowercase ( self: int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
UpperCamelCase_ = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase_ = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase_ = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 357 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: Any , _SCREAMING_SNAKE_CASE: int = 768 , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(1 , _SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(torch.ones(1 , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Optional[Union[str, torch.device]] = None , _SCREAMING_SNAKE_CASE: Optional[torch.dtype] = None , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = nn.Parameter(self.mean.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
UpperCamelCase_ = nn.Parameter(self.std.to(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) )
return self
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = (embeds * self.std) + self.mean
return embeds
| 328 | 0 |
"""simple docstring"""
_UpperCAmelCase = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = set()
# keep track of all the paths to be checked
UpperCamelCase_ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase_ = queue.pop(0 )
# get the last node from the path
UpperCamelCase_ = path[-1]
if node not in explored:
UpperCamelCase_ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase_ = list(A__ )
new_path.append(A__ )
queue.append(A__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A__ )
# in case there's no path between the 2 nodes
return []
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase_ = [start]
UpperCamelCase_ = set(A__ )
# Keep tab on distances from `start` node.
UpperCamelCase_ = {start: 0, target: -1}
while queue:
UpperCamelCase_ = queue.pop(0 )
if node == target:
UpperCamelCase_ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A__ )
queue.append(A__ )
UpperCamelCase_ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 358 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def lowerCAmelCase_ ( ) -> Optional[int]:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCamelCase_ = parser.parse_args()
return args.f
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_="eval" ) -> Any:
UpperCamelCase_ = os.path.join(UpperCamelCase_ , F'''{split}_results.json''' )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , "r" ) as f:
return json.load(UpperCamelCase_ )
raise ValueError(F'''can\'t find {path}''' )
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( lowerCAmelCase_ ):
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_glue.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_clm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowercase ( self: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_summarization_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_ta_mlm_flax.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def lowercase ( self: str ) -> int:
"""simple docstring"""
UpperCamelCase_ = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_flax_ner.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.get_auto_remove_tmp_dir()
UpperCamelCase_ = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(_SCREAMING_SNAKE_CASE , "argv" , _SCREAMING_SNAKE_CASE ):
run_qa.main()
UpperCamelCase_ = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 )
| 328 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 ) -> str:
UpperCamelCase_ = [], []
UpperCamelCase_ = list(zip(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCamelCase_ = sorted_examples[0]
def is_too_big(UpperCamelCase_ ):
return tok(_UpperCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCamelCase_ = new_src + ' ' + src
UpperCamelCase_ = new_tgt + ' ' + tgt
if is_too_big(_UpperCAmelCase ) or is_too_big(_UpperCAmelCase ): # cant fit, finalize example
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
UpperCamelCase_ = src, tgt
else: # can fit, keep adding
UpperCamelCase_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
return finished_src, finished_tgt
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = Path(_UpperCAmelCase )
save_path.mkdir(exist_ok=_UpperCAmelCase )
for split in ["train"]:
UpperCamelCase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
UpperCamelCase_ = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
UpperCamelCase_ = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
UpperCamelCase_ = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print(F'''packed {split} split from {len(_UpperCAmelCase )} examples -> {len(_UpperCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
for split in ["val", "test"]:
UpperCamelCase_ = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.target''' )
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_UpperCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_UpperCAmelCase , default=128 )
parser.add_argument("--data_dir" , type=_UpperCAmelCase )
parser.add_argument("--save_path" , type=_UpperCAmelCase )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 359 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
for param in module.parameters():
UpperCamelCase_ = False
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCamelCase_ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Union[str, Any]:
UpperCamelCase_ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase_ = datetime.now()
UpperCamelCase_ = current_time.strftime("%H:%M:%S" )
return timestamp
| 328 | 0 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCamelCase :
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any]=13 , _SCREAMING_SNAKE_CASE: Dict=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: int=False , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: int=99 , _SCREAMING_SNAKE_CASE: Any=32 , _SCREAMING_SNAKE_CASE: Dict=5 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: List[Any]=37 , _SCREAMING_SNAKE_CASE: List[str]="gelu" , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: List[Any]=16 , _SCREAMING_SNAKE_CASE: Tuple=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: str=4 , _SCREAMING_SNAKE_CASE: Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Any ) -> Union[str, Any]:
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = LlamaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple , ) -> str:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = LlamaModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Dict , ) -> Any:
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = LlamaForCausalLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
# first forward pass
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
UpperCamelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
UpperCamelCase_ = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
_UpperCamelCase : Tuple = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCamelCase : Optional[int] = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : List[str] = False
def lowercase ( self: int ) -> int:
"""simple docstring"""
UpperCamelCase_ = LlamaModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def lowercase ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "single_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase_ = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = 3
UpperCamelCase_ = "multi_label_classification"
UpperCamelCase_ = input_dict["input_ids"]
UpperCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase_ = LlamaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = LlamaModel(__UpperCamelCase )
original_model.to(__UpperCamelCase )
original_model.eval()
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = original_model(__UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase_ = {"type": scaling_type, "factor": 10.0}
UpperCamelCase_ = LlamaModel(__UpperCamelCase )
scaled_model.to(__UpperCamelCase )
scaled_model.eval()
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
UpperCamelCase_ = scaled_model(__UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
UpperCamelCase_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCamelCase_ = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase_ = torch.tensor([-12.8281, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.8281, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
UpperCamelCase_ = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
UpperCamelCase_ = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase_ = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
UpperCamelCase_ = model(torch.tensor(__UpperCamelCase ) )
# Expected mean on dim = -1
UpperCamelCase_ = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCamelCase_ = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCamelCase_ = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
UpperCamelCase_ = model(torch.tensor(__UpperCamelCase ) )
UpperCamelCase_ = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCamelCase_ = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip("Model is curently gated" )
@slow
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"
UpperCamelCase_ = "Simply put, the theory of relativity states that "
UpperCamelCase_ = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , return_tensors="pt" )
UpperCamelCase_ = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase )
# greedy generation outputs
UpperCamelCase_ = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase )
UpperCamelCase_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
| 360 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = '▁'
_UpperCAmelCase = {'vocab_file': 'spiece.model'}
_UpperCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
_UpperCAmelCase = {
'google/pegasus-xsum': 5_1_2,
}
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str="<pad>" , _SCREAMING_SNAKE_CASE: Optional[Any]="</s>" , _SCREAMING_SNAKE_CASE: Any="<unk>" , _SCREAMING_SNAKE_CASE: int="<mask_2>" , _SCREAMING_SNAKE_CASE: List[Any]="<mask_1>" , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[int]=103 , _SCREAMING_SNAKE_CASE: Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE: Dict , ) -> None:
"""simple docstring"""
UpperCamelCase_ = offset
if additional_special_tokens is not None:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError(
f'''additional_special_tokens should be of type {type(_SCREAMING_SNAKE_CASE )}, but is'''
f''' {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(_SCREAMING_SNAKE_CASE ) , self.offset - 1 )
]
if len(set(_SCREAMING_SNAKE_CASE ) ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
UpperCamelCase_ = additional_special_tokens_extended
else:
UpperCamelCase_ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
UpperCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token_sent=_SCREAMING_SNAKE_CASE , offset=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = mask_token_sent
UpperCamelCase_ = vocab_file
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# add special tokens to encoder dict
UpperCamelCase_ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
@property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowercase ( self: int ) -> Dict[str, int]:
"""simple docstring"""
UpperCamelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.__dict__.copy()
UpperCamelCase_ = None
return state
def __setstate__( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase_ = {}
UpperCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
UpperCamelCase_ = self.sp_model.piece_to_id(_SCREAMING_SNAKE_CASE )
return sp_id + self.offset
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
UpperCamelCase_ = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
UpperCamelCase_ = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]=False ) -> Union[str, Any]:
"""simple docstring"""
return 1
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List , _SCREAMING_SNAKE_CASE: Optional[List] = None , _SCREAMING_SNAKE_CASE: bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(_SCREAMING_SNAKE_CASE )
elif token_ids_a is None:
return self._special_token_mask(_SCREAMING_SNAKE_CASE ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 328 | 0 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
return math.pow(UpperCamelCase_ , 2 ) - a
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return 2 * x
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = 2.0
while start <= a:
UpperCamelCase_ = math.pow(UpperCamelCase_ , 2 )
return start
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 9999 , UpperCamelCase_ = 0.00_00_00_00_00_00_01 ) -> List[Any]:
if a < 0:
raise ValueError("math domain error" )
UpperCamelCase_ = get_initial_point(UpperCamelCase_ )
for _ in range(UpperCamelCase_ ):
UpperCamelCase_ = value
UpperCamelCase_ = value - fx(UpperCamelCase_ , UpperCamelCase_ ) / fx_derivative(UpperCamelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 361 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'],
'tokenization_tapas': ['TapasTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TapasForMaskedLM',
'TapasForQuestionAnswering',
'TapasForSequenceClassification',
'TapasModel',
'TapasPreTrainedModel',
'load_tf_weights_in_tapas',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFTapasForMaskedLM',
'TFTapasForQuestionAnswering',
'TFTapasForSequenceClassification',
'TFTapasModel',
'TFTapasPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 328 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _UpperCamelCase ( UpperCamelCase_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = RoFormerTokenizer
_UpperCamelCase : List[Any] = RoFormerTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : Dict = True
def lowercase ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
def lowercase ( self: Union[str, Any] , **_SCREAMING_SNAKE_CASE: List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_a )
def lowercase ( self: List[str] , **_SCREAMING_SNAKE_CASE: int ) -> List[Any]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_a )
def lowercase ( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = """永和服装饰品有限公司,今天天气非常好"""
UpperCamelCase_ = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_chinese_input_output_texts()
UpperCamelCase_ = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
UpperCamelCase_ = tokens + [tokenizer.unk_token]
UpperCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = self.get_chinese_input_output_texts()
UpperCamelCase_ = tokenizer.tokenize(_a )
self.assertListEqual(_a , output_text.split() )
UpperCamelCase_ = tokens + [tokenizer.unk_token]
UpperCamelCase_ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self: List[str] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase ( self: Any ) -> Optional[int]:
"""simple docstring"""
pass
| 362 |
import argparse
import json
from tqdm import tqdm
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=UpperCamelCase_ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=UpperCamelCase_ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=UpperCamelCase_ , help="where to store parsed gold_data_path file" , )
UpperCamelCase_ = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
UpperCamelCase_ = json.load(UpperCamelCase_ )
for dpr_record in tqdm(UpperCamelCase_ ):
UpperCamelCase_ = dpr_record["question"]
UpperCamelCase_ = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(UpperCamelCase_ ) + "\n" )
if __name__ == "__main__":
main()
| 328 | 0 |
import datasets
from .evaluate import evaluate
_UpperCAmelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCAmelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCAmelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
UpperCamelCase_ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
UpperCamelCase_ = evaluate(dataset=__SCREAMING_SNAKE_CASE , predictions=__SCREAMING_SNAKE_CASE )
return score
| 363 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 0 |
import argparse
import struct
import unittest
class _UpperCamelCase :
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: bytes ) -> None:
"""simple docstring"""
UpperCamelCase_ = data
# Initialize hash values
UpperCamelCase_ = [
0X6A09E667,
0XBB67AE85,
0X3C6EF372,
0XA54FF53A,
0X510E527F,
0X9B05688C,
0X1F83D9AB,
0X5BE0CD19,
]
# Initialize round constants
UpperCamelCase_ = [
0X428A2F98,
0X71374491,
0XB5C0FBCF,
0XE9B5DBA5,
0X3956C25B,
0X59F111F1,
0X923F82A4,
0XAB1C5ED5,
0XD807AA98,
0X12835B01,
0X243185BE,
0X550C7DC3,
0X72BE5D74,
0X80DEB1FE,
0X9BDC06A7,
0XC19BF174,
0XE49B69C1,
0XEFBE4786,
0X0FC19DC6,
0X240CA1CC,
0X2DE92C6F,
0X4A7484AA,
0X5CB0A9DC,
0X76F988DA,
0X983E5152,
0XA831C66D,
0XB00327C8,
0XBF597FC7,
0XC6E00BF3,
0XD5A79147,
0X06CA6351,
0X14292967,
0X27B70A85,
0X2E1B2138,
0X4D2C6DFC,
0X53380D13,
0X650A7354,
0X766A0ABB,
0X81C2C92E,
0X92722C85,
0XA2BFE8A1,
0XA81A664B,
0XC24B8B70,
0XC76C51A3,
0XD192E819,
0XD6990624,
0XF40E3585,
0X106AA070,
0X19A4C116,
0X1E376C08,
0X2748774C,
0X34B0BCB5,
0X391C0CB3,
0X4ED8AA4A,
0X5B9CCA4F,
0X682E6FF3,
0X748F82EE,
0X78A5636F,
0X84C87814,
0X8CC70208,
0X90BEFFFA,
0XA4506CEB,
0XBEF9A3F7,
0XC67178F2,
]
UpperCamelCase_ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def lowercase ( _SCREAMING_SNAKE_CASE: bytes ) -> bytes:
"""simple docstring"""
UpperCamelCase_ = b"\x80" + (b"\x00" * (63 - (len(_SCREAMING_SNAKE_CASE ) + 8) % 64))
UpperCamelCase_ = struct.pack(">Q" , (len(_SCREAMING_SNAKE_CASE ) * 8) )
return data + padding + big_endian_integer
def lowercase ( self: Tuple ) -> None:
"""simple docstring"""
UpperCamelCase_ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCamelCase_ = list(struct.unpack(">16L" , _SCREAMING_SNAKE_CASE ) )
# add 48 0-ed integers
words += [0] * 48
UpperCamelCase_ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCamelCase_ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
UpperCamelCase_ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
UpperCamelCase_ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
UpperCamelCase_ = self.ror(_SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 25 )
UpperCamelCase_ = (e & f) ^ ((~e & 0XFFFFFFFF) & g)
UpperCamelCase_ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
UpperCamelCase_ = self.ror(_SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(_SCREAMING_SNAKE_CASE , 22 )
UpperCamelCase_ = (a & b) ^ (a & c) ^ (b & c)
UpperCamelCase_ = (sa + maj) % 0X100000000
UpperCamelCase_ = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
UpperCamelCase_ = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCamelCase_ = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes )
]
UpperCamelCase_ = "".join([hex(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int ) -> int:
"""simple docstring"""
return 0XFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Tuple ) -> None:
"""simple docstring"""
import hashlib
UpperCamelCase_ = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() )
def lowerCAmelCase_ ( ) -> None:
import doctest
doctest.testmod()
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCamelCase_ = f.read()
else:
UpperCamelCase_ = bytes(__snake_case , "utf-8" )
print(SHAaaa(__snake_case ).hash )
if __name__ == "__main__":
main()
| 364 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self: List[str] , *,
_SCREAMING_SNAKE_CASE: int = 4 , _SCREAMING_SNAKE_CASE: int = 768 , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
UpperCamelCase_ = clip_extra_context_tokens
UpperCamelCase_ = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
UpperCamelCase_ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , *, _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple ) -> str:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
UpperCamelCase_ = image_embeddings.shape[0]
UpperCamelCase_ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
UpperCamelCase_ = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
UpperCamelCase_ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
UpperCamelCase_ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
UpperCamelCase_ = self.embedding_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
UpperCamelCase_ = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
UpperCamelCase_ = clip_extra_context_tokens.permute(0 , 2 , 1 )
UpperCamelCase_ = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 328 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase_ = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self: List[str] , **_SCREAMING_SNAKE_CASE: int ) -> Union[str, Any]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowercase ( self: Any , **_SCREAMING_SNAKE_CASE: int ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase ( self: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase_ = self.get_image_processor(do_normalize=lowerCamelCase_ , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(lowerCamelCase_ , return_tensors="np" )
UpperCamelCase_ = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase_ = """lower newer"""
UpperCamelCase_ = processor(text=lowerCamelCase_ )
UpperCamelCase_ = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self: List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase_ = """lower newer"""
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase_ ):
processor()
def lowercase ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(lowerCamelCase_ )
UpperCamelCase_ = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
UpperCamelCase_ = """lower newer"""
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 365 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Tuple = GPTSanJapaneseTokenizer
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase_ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase_ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
UpperCamelCase_ = {"unk_token": "<unk>"}
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(_lowercase ) )
def lowercase ( self: Union[str, Any] , **_SCREAMING_SNAKE_CASE: Dict ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = "こんにちは、世界。 \nこんばんは、㔺界。😀"
UpperCamelCase_ = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.get_input_output_texts(_lowercase )
UpperCamelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def lowercase ( self: Any ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def lowercase ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def lowercase ( self: int ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase_ = "こんにちは、世界。 こんばんは、㔺界。"
UpperCamelCase_ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
UpperCamelCase_ = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids without special tokens
UpperCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids with special tokens
UpperCamelCase_ = tokens + [tokenizer.unk_token]
UpperCamelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowercase ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.get_tokenizer()
# Testing tokenization
UpperCamelCase_ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
UpperCamelCase_ = "こんにちは、、、、世界。こんばんは、、、、世界。"
UpperCamelCase_ = tokenizer.encode(_lowercase )
UpperCamelCase_ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def lowercase ( self: str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase_ = "こんにちは、世界。"
UpperCamelCase_ = "こんばんは、㔺界。😀"
UpperCamelCase_ = "こんにちは、世界。こんばんは、世界。😀"
UpperCamelCase_ = tokenizer.encode(prefix_text + input_text )
UpperCamelCase_ = tokenizer.encode("" , prefix_text=prefix_text + input_text )
UpperCamelCase_ = tokenizer.encode(_lowercase , prefix_text=_lowercase )
UpperCamelCase_ = tokenizer.decode(_lowercase )
UpperCamelCase_ = tokenizer.decode(_lowercase )
UpperCamelCase_ = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
UpperCamelCase_ = "こんにちは、世界。"
UpperCamelCase_ = "こんばんは、㔺界。😀"
UpperCamelCase_ = len(tokenizer.encode(_lowercase ) ) - 2
UpperCamelCase_ = len(tokenizer.encode(_lowercase ) ) - 2
UpperCamelCase_ = [1] + [0] * (len_prefix + len_text + 1)
UpperCamelCase_ = [1] * (len_prefix + len_text + 1) + [0]
UpperCamelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCamelCase_ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCamelCase_ = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCamelCase_ = tokenizer(_lowercase , prefix_text=_lowercase ).token_type_ids
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def lowercase ( self: str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase_ = tokenizer.encode("あンいワ" )
UpperCamelCase_ = tokenizer.encode("" , prefix_text="あンいワ" )
UpperCamelCase_ = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertNotEqual(_lowercase , _lowercase )
self.assertNotEqual(_lowercase , _lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowercase ( self: int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
UpperCamelCase_ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
UpperCamelCase_ = tokenizer(_lowercase , padding=_lowercase )
UpperCamelCase_ = tokenizer.batch_encode_plus(_lowercase , padding=_lowercase )
# fmt: off
UpperCamelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
UpperCamelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCamelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _lowercase )
self.assertListEqual(x_token.token_type_ids , _lowercase )
self.assertListEqual(x_token.attention_mask , _lowercase )
self.assertListEqual(x_token_a.input_ids , _lowercase )
self.assertListEqual(x_token_a.token_type_ids , _lowercase )
self.assertListEqual(x_token_a.attention_mask , _lowercase )
def lowercase ( self: Optional[int] ) -> Dict:
"""simple docstring"""
pass
def lowercase ( self: List[str] ) -> Any:
"""simple docstring"""
pass
| 366 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
UpperCamelCase_ = len(UpperCamelCase_ )
UpperCamelCase_ = len(matrix[0] )
UpperCamelCase_ = min(UpperCamelCase_ , UpperCamelCase_ )
for row in range(UpperCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase_ ):
UpperCamelCase_ = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
UpperCamelCase_ = True
for i in range(row + 1 , UpperCamelCase_ ):
if matrix[i][row] != 0:
UpperCamelCase_ , UpperCamelCase_ = matrix[i], matrix[row]
UpperCamelCase_ = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase_ ):
UpperCamelCase_ = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 328 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = False ) -> List[Any]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = F'''Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase_ = F'''Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCamelCase_ = input_str.split("_" )
UpperCamelCase_ = 0 if use_pascal else 1
UpperCamelCase_ = words[start_index:]
UpperCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase_ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
import math
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("This should never happen" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_UpperCAmelCase = 'Enter the base and the power separated by a comma: '
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
_UpperCAmelCase , _UpperCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_UpperCAmelCase = res(xa, ya)
_UpperCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 328 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]:
UpperCamelCase_ = 0
UpperCamelCase_ = len(UpperCamelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCamelCase_ = i + 1
else:
UpperCamelCase_ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 1_1, 1_5], 9) = }''')
| 368 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[Any]:
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase_ = [image]
UpperCamelCase_ = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase_ = torch.stack(UpperCamelCase_ )
return image
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> Optional[Any]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = min(int(num_inference_steps * strength ) , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int]=None ) -> List[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_SCREAMING_SNAKE_CASE )}''' )
UpperCamelCase_ = image.to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
# get latents
print("add noise to latents at timestep" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: Union[torch.FloatTensor, PIL.Image.Image] = None , _SCREAMING_SNAKE_CASE: float = 0.8 , _SCREAMING_SNAKE_CASE: int = 1 , _SCREAMING_SNAKE_CASE: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _SCREAMING_SNAKE_CASE: float = 0.0 , _SCREAMING_SNAKE_CASE: int = 50 , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[str] = "pil" , _SCREAMING_SNAKE_CASE: bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(_SCREAMING_SNAKE_CASE )
# 2. Preprocess image
UpperCamelCase_ = preprocess(_SCREAMING_SNAKE_CASE )
# 3. set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=self.device )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.device )
UpperCamelCase_ = timesteps[:1].repeat(_SCREAMING_SNAKE_CASE )
# 4. Prepare latent variables
UpperCamelCase_ = self.prepare_latents(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.unet.dtype , self.device , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(_SCREAMING_SNAKE_CASE ):
# 1. predict noise model_output
UpperCamelCase_ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , use_clipped_model_output=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 328 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
UpperCamelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase_ = tokenizer("Hello there" , return_tensors="np" ).input_ids
UpperCamelCase_ = tokenizer("Hi I am" , return_tensors="np" ).input_ids
UpperCamelCase_ = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
UpperCamelCase_ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
UpperCamelCase_ = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1] ) ).mean()
UpperCamelCase_ = -(labels.shape[-1] * loss.item())
UpperCamelCase_ = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 369 |
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 328 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( UpperCamelCase_ ):
UpperCamelCase_ = filter(lambda UpperCamelCase_ : p.requires_grad , model.parameters() )
UpperCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase = logging.getLogger(__name__)
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
if metric == "rouge2":
UpperCamelCase_ = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
UpperCamelCase_ = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
UpperCamelCase_ = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
UpperCamelCase_ = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class _UpperCamelCase ( pl.Callback ):
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str=True ) -> List[Any]:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
UpperCamelCase_ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
UpperCamelCase_ = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase_ = od / "test_results.txt"
UpperCamelCase_ = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase_ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
UpperCamelCase_ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
generations_file.parent.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "a+" ) as writer:
for key in sorted(_SCREAMING_SNAKE_CASE ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase_ = metrics[key]
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
UpperCamelCase_ = val.item()
UpperCamelCase_ = f'''{key}: {val:.6f}\n'''
writer.write(_SCREAMING_SNAKE_CASE )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase_ = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_SCREAMING_SNAKE_CASE )
@rank_zero_only
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int ) -> Optional[int]:
"""simple docstring"""
try:
UpperCamelCase_ = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase_ = pl_module.model.num_parameters()
UpperCamelCase_ = count_trainable_parameters(_SCREAMING_SNAKE_CASE )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: pl.LightningModule ) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , "test" )
@rank_zero_only
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: pl.Trainer , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Optional[int]:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 370 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DiTPipeline
_UpperCamelCase : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCamelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Dict = False
def lowercase ( self: str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase_ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = AutoencoderKL()
UpperCamelCase_ = DDIMScheduler()
UpperCamelCase_ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]=0 ) -> Dict:
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
UpperCamelCase_ = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = "cpu"
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCamelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase_ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
UpperCamelCase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def lowercase ( self: Optional[int] ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Optional[int] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowercase ( self: int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase_ = ["vase", "umbrella"]
UpperCamelCase_ = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 328 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Any=13 , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: List[Any]=True , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=99 , _SCREAMING_SNAKE_CASE: str=32 , _SCREAMING_SNAKE_CASE: str=5 , _SCREAMING_SNAKE_CASE: Union[str, Any]=4 , _SCREAMING_SNAKE_CASE: Optional[int]=64 , _SCREAMING_SNAKE_CASE: str="gelu" , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.1 , _SCREAMING_SNAKE_CASE: Tuple=512 , _SCREAMING_SNAKE_CASE: Union[str, Any]=16 , _SCREAMING_SNAKE_CASE: str=2 , _SCREAMING_SNAKE_CASE: List[str]=0.02 , _SCREAMING_SNAKE_CASE: Union[str, Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: List[Any]=2 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: int=1 , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
UpperCamelCase_ = q_groups
UpperCamelCase_ = k_groups
UpperCamelCase_ = v_groups
UpperCamelCase_ = post_attention_groups
UpperCamelCase_ = intermediate_groups
UpperCamelCase_ = output_groups
def lowercase ( self: Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(UpperCamelCase_) = config_and_inputs
UpperCamelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCamelCase : str = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Any = False
_UpperCamelCase : List[Any] = True
_UpperCamelCase : int = False
def lowercase ( self: Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def lowercase ( self: Dict ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase ( self: List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def lowercase ( self: List[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
UpperCamelCase_ = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
UpperCamelCase_ = model(_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase_ = torch.Size((1, 3) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 371 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
def __init__( self: str ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ""
UpperCamelCase_ = ""
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = 256
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = 0
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = cva.imread(_SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase_ = copy.deepcopy(self.img )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCamelCase_ = np.sum(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase_ = x[i] / self.k
self.sk += prk
UpperCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase_ = int(last % last )
UpperCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase_ = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def lowercase ( self: Any ) -> Optional[Any]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowercase ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCAmelCase = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
_UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 328 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
def __init__( self: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Any=3 , _SCREAMING_SNAKE_CASE: List[str]=30 , _SCREAMING_SNAKE_CASE: Optional[Any]=400 , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: int=None , _SCREAMING_SNAKE_CASE: Tuple=True , _SCREAMING_SNAKE_CASE: Optional[Any]=1 / 255 , _SCREAMING_SNAKE_CASE: Dict=True , _SCREAMING_SNAKE_CASE: str=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: List[Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Optional[Any]=True , ) -> int:
"""simple docstring"""
UpperCamelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = min_resolution
UpperCamelCase_ = max_resolution
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean
UpperCamelCase_ = image_std
UpperCamelCase_ = do_pad
def lowercase ( self: int ) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[Any]=False ) -> List[Any]:
"""simple docstring"""
if not batched:
UpperCamelCase_ = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase_ , UpperCamelCase_ = image.size
else:
UpperCamelCase_ , UpperCamelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_ = int(self.size["shortest_edge"] * h / w )
UpperCamelCase_ = self.size["shortest_edge"]
elif w > h:
UpperCamelCase_ = self.size["shortest_edge"]
UpperCamelCase_ = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase_ = self.size["shortest_edge"]
UpperCamelCase_ = self.size["shortest_edge"]
else:
UpperCamelCase_ = []
for image in image_inputs:
UpperCamelCase_ , UpperCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCamelCase ( a_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def lowercase ( self: str ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = DetrImageProcessingTester(self )
@property
def lowercase ( self: Dict ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_rescale" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "rescale_factor" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_pad" ) )
def lowercase ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def lowercase ( self: str ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowercase ( self: int ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self: Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
UpperCamelCase_ , UpperCamelCase_ = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase ( self: Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"image_id": 39769, "annotations": target}
# encode them
UpperCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
UpperCamelCase_ = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) )
@slow
def lowercase ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCamelCase_ = json.loads(f.read() )
UpperCamelCase_ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
UpperCamelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCamelCase_ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
UpperCamelCase_ = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# verify pixel values
UpperCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase_ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _SCREAMING_SNAKE_CASE ) )
| 350 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
_UpperCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
_UpperCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] = 1 , _SCREAMING_SNAKE_CASE: str = 4 , ) -> Optional[Any]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case , hypotheses=_snake_case , min_len=_snake_case , max_len=_snake_case )
}
| 351 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = '''mgp-str'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int]=[32, 128] , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Optional[int]=27 , _SCREAMING_SNAKE_CASE: Tuple=38 , _SCREAMING_SNAKE_CASE: Tuple=50257 , _SCREAMING_SNAKE_CASE: List[Any]=30522 , _SCREAMING_SNAKE_CASE: Optional[Any]=768 , _SCREAMING_SNAKE_CASE: Dict=12 , _SCREAMING_SNAKE_CASE: List[str]=12 , _SCREAMING_SNAKE_CASE: Dict=4.0 , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Tuple=False , _SCREAMING_SNAKE_CASE: Tuple=1e-5 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.0 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: List[str]=False , _SCREAMING_SNAKE_CASE: int=0.02 , **_SCREAMING_SNAKE_CASE: Any , ) -> str:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = max_token_length
UpperCamelCase_ = num_character_labels
UpperCamelCase_ = num_bpe_labels
UpperCamelCase_ = num_wordpiece_labels
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = mlp_ratio
UpperCamelCase_ = distilled
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = drop_rate
UpperCamelCase_ = qkv_bias
UpperCamelCase_ = attn_drop_rate
UpperCamelCase_ = drop_path_rate
UpperCamelCase_ = output_aa_attentions
UpperCamelCase_ = initializer_range
| 328 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
_UpperCAmelCase = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
_UpperCAmelCase = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]:
UpperCamelCase_ = set()
UpperCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase_ = char
UpperCamelCase_ = set(UpperCamelCase__ )
return pairs
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: List[str]="<s>" , _SCREAMING_SNAKE_CASE: int="</s>" , _SCREAMING_SNAKE_CASE: Tuple="</s>" , _SCREAMING_SNAKE_CASE: Optional[Any]="<s>" , _SCREAMING_SNAKE_CASE: Optional[int]="<unk>" , _SCREAMING_SNAKE_CASE: Dict="<pad>" , _SCREAMING_SNAKE_CASE: Any="<mask>" , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = merges_file
UpperCamelCase_ = {}
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 2
UpperCamelCase_ = 3
self.add_from_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
UpperCamelCase_ = merges_handle.read().split("\n" )[:-1]
UpperCamelCase_ = [tuple(merge.split()[:-1] ) for merge in merges]
UpperCamelCase_ = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase_ = {}
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
UpperCamelCase_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: Optional[int] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase ( self: Tuple ) -> List[str]:
"""simple docstring"""
return len(self.encoder )
def lowercase ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] ) -> List[Any]:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase_ = tuple(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCamelCase_ = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase_ = bigram
UpperCamelCase_ = []
UpperCamelCase_ = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCamelCase_ = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase_ = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase_ = tuple(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCamelCase_ = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = '''@@ '''.join(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = word[:-4]
UpperCamelCase_ = word
return word
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = []
UpperCamelCase_ = re.findall(R"\S+\n?" , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any ) -> Optional[Any]:
"""simple docstring"""
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: List[str] ) -> List[str]:
"""simple docstring"""
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: str ) -> str:
"""simple docstring"""
UpperCamelCase_ = ''' '''.join(_SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
if os.path.abspath(self.merges_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.merges_file , _SCREAMING_SNAKE_CASE )
return out_vocab_file, out_merge_file
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any] ) -> Any:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as fd:
self.add_from_file(_SCREAMING_SNAKE_CASE )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
UpperCamelCase_ = f.readlines()
for lineTmp in lines:
UpperCamelCase_ = lineTmp.strip()
UpperCamelCase_ = line.rfind(" " )
if idx == -1:
raise ValueError("Incorrect dictionary format, expected \'<token> <cnt>\'" )
UpperCamelCase_ = line[:idx]
UpperCamelCase_ = len(self.encoder )
| 352 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_UpperCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
_UpperCamelCase : bool = field(default=lowerCAmelCase_ , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class _UpperCamelCase :
_UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_UpperCamelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
_UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
_UpperCamelCase : Optional[int] = field(
default=1_4_2 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Source language id for translation.'''} )
_UpperCamelCase : Optional[str] = field(default=lowerCAmelCase_ , metadata={'''help''': '''Target language id for translation.'''} )
_UpperCamelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
_UpperCamelCase : bool = field(
default=lowerCAmelCase_ , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCamelCase_ , os.path.join(UpperCamelCase_ , F'''{split}_results.json''' ) )
def lowerCAmelCase_ ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCamelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(UpperCamelCase_ , UpperCamelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCamelCase_ , UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
UpperCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCamelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCamelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCamelCase_ = SeqaSeqDataset
# Get datasets
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCamelCase_ = (
dataset_class(
UpperCamelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCamelCase_ ) if training_args.predict_with_generate else None
)
UpperCamelCase_ = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , data_args=UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , data_collator=SeqaSeqDataCollator(
UpperCamelCase_ , UpperCamelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
UpperCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
UpperCamelCase_ = data_args.n_val
UpperCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCamelCase_ = trainer.predict(test_dataset=UpperCamelCase_ , metric_key_prefix="test" )
UpperCamelCase_ = test_output.metrics
UpperCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
UpperCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCamelCase_ , training_args.output_dir )
all_metrics.update(UpperCamelCase_ )
if training_args.predict_with_generate:
UpperCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
UpperCamelCase_ = lmap(str.strip , UpperCamelCase_ )
write_txt_file(UpperCamelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCamelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def lowerCAmelCase_ ( UpperCamelCase_ ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 328 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.